Merge branch 'master' of https://github.com/apache/incubator-amaterasu

# Conflicts:
#	common/src/main/kotlin/org/apache/amaterasu/common/configuration/enums/ActionStatus.kt
#	frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/SparkSetupProvider.scala
#	leader/src/main/scala/org/apache/amaterasu/leader/execution/JobManager.scala
#	leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/JobScheduler.scala
diff --git a/README.md b/README.md
index bbc54e1..4a9fd12 100755
--- a/README.md
+++ b/README.md
@@ -14,7 +14,10 @@
   ~ See the License for the specific language governing permissions and
   ~ limitations under the License.
   -->
-# Apache Amaterasu [![Build Status](https://travis-ci.org/apache/incubator-amaterasu.svg?branch=master)](https://travis-ci.org/apache/incubator-amaterasu)
+# Apache Amaterasu 
+
+[![Build Status](https://travis-ci.org/apache/incubator-amaterasu.svg?branch=master)](https://travis-ci.org/apache/incubator-amaterasu)
+[![License](http://img.shields.io/:license-Apache%202-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0.txt)
 
                                                /\
                                               /  \ /\
diff --git a/build.gradle b/build.gradle
index d452377..dc5aa95 100644
--- a/build.gradle
+++ b/build.gradle
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 buildscript {
-    ext.kotlin_version = '1.3.0'
+    ext.kotlin_version = '1.3.21'
 
     repositories {
         mavenCentral()
@@ -23,6 +23,7 @@
 
     dependencies {
         classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
+        classpath 'org.junit.platform:junit-platform-gradle-plugin:1.1.0'
     }
 }
 
@@ -35,7 +36,7 @@
 apply plugin: 'distribution'
 apply plugin: 'kotlin'
 apply plugin: 'project-report'
-
+apply plugin: 'org.junit.platform.gradle.plugin'
 htmlDependencyReport {
     projects = project.allprojects
 }
@@ -92,4 +93,36 @@
 
 dependencies {
     compile "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
+
+
+    testCompile(
+            'org.junit.jupiter:junit-jupiter-api:5.2.0'
+    )
+    testRuntime(
+            'org.junit.jupiter:junit-jupiter-engine:5.2.0'
+    )
+
+    testImplementation(
+            'org.junit.jupiter:junit-jupiter-api:5.2.0'
+    )
+    testRuntimeOnly(
+            'org.junit.jupiter:junit-jupiter-engine:5.2.0',
+            'org.junit.vintage:junit-vintage-engine:5.2.0',
+            'org.junit.platform:junit-platform-launcher:1.1.0',
+            'org.junit.platform:junit-platform-runner:1.1.0'
+    )
+
+
+}
+
+
+compileTestKotlin {
+    kotlinOptions {
+        freeCompilerArgs = ["-Xjsr305=strict"]
+        jvmTarget = "1.8"
+    }
+}
+
+test {
+    useJUnitPlatform()
 }
\ No newline at end of file
diff --git a/common/build.gradle b/common/build.gradle
index 9a456ce..bd30444 100644
--- a/common/build.gradle
+++ b/common/build.gradle
@@ -34,6 +34,14 @@
     mavenCentral()
 }
 
+junitPlatform {
+    filters {
+        engines {
+            include 'spek'
+        }
+    }
+}
+
 configurations {
     provided
     compile.extendsFrom provided
@@ -45,20 +53,35 @@
     compile group: 'org.slf4j', name: 'slf4j-api', version: '1.7.9'
     compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.9'
     compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.9.4'
-
+    compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-kotlin', version: '2.9.8'
+    compile group: 'commons-validator', name: 'commons-validator', version: '1.6'
+    compile group: 'software.amazon.awssdk', name: 's3', version: '2.5.23'
 
     compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
     compile "org.jetbrains.kotlin:kotlin-reflect"
 
     // currently we have to use this specific mesos version to prevent from
     // clashing with spark
-    compile('org.apache.mesos:mesos:0.22.2:shaded-protobuf') {
+    compile('org.apache.mesos:mesos:1.7.0:shaded-protobuf') {
         exclude group: 'com.google.protobuf', module: 'protobuf-java'
     }
-    provided group: 'org.apache.hadoop', name: 'hadoop-yarn-client', version: '2.7.3'
-    provided group: 'org.apache.hadoop', name: 'hadoop-common', version: '2.7.3'
-    provided group: 'org.apache.hadoop', name: 'hadoop-yarn-api', version: '2.7.3'
-    provided group: 'org.apache.hadoop', name: 'hadoop-hdfs', version: '2.7.3'
+
+    compile('com.jcabi:jcabi-aether:0.10.1') {
+        exclude group: 'org.jboss.netty'
+    }
+
+    compile('org.apache.activemq:activemq-client:5.15.2') {
+        exclude group: 'org.jboss.netty'
+    }
+
+    compile group: 'org.apache.maven', name: 'maven-core', version: '3.0.5'
+    compile group: 'net.liftweb', name: 'lift-json_2.11', version: '3.2.0'
+    compile group: 'net.liftweb', name: 'lift-json_2.11', version: '3.2.0'
+
+    provided group: 'org.apache.hadoop', name: 'hadoop-yarn-client', version: '2.8.4'
+    provided group: 'org.apache.hadoop', name: 'hadoop-common', version: '2.8.4'
+    provided group: 'org.apache.hadoop', name: 'hadoop-yarn-api', version: '2.8.4'
+    provided group: 'org.apache.hadoop', name: 'hadoop-hdfs', version: '2.8.4'
 
     testCompile "gradle.plugin.com.github.maiflai:gradle-scalatest:0.14"
     testRuntime 'org.pegdown:pegdown:1.1.0'
diff --git a/common/src/main/java/org/apache/amaterasu/common/logging/Logging.java b/common/src/main/java/org/apache/amaterasu/common/logging/Logging.java
index 3f3413f..7a11ddb 100644
--- a/common/src/main/java/org/apache/amaterasu/common/logging/Logging.java
+++ b/common/src/main/java/org/apache/amaterasu/common/logging/Logging.java
@@ -1,10 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.amaterasu.common.logging;
 
 import org.slf4j.Logger;
 
-/**
- * Created by Eran Bartenstein (p765790) on 5/11/18.
- */
 public abstract class Logging extends KLogging {
     protected Logger log = getLog();
 }
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/configuration/enums/ActionStatus.kt b/common/src/main/kotlin/org/apache/amaterasu/common/configuration/enums/ActionStatus.kt
index 5a445ac..be96305 100644
--- a/common/src/main/kotlin/org/apache/amaterasu/common/configuration/enums/ActionStatus.kt
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/configuration/enums/ActionStatus.kt
@@ -1,14 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.amaterasu.common.configuration.enums
 
-/**
- * Created by Eran Bartenstein on 21/10/18.
- */
 enum class ActionStatus (val value: String) {
-    pending("pending"),
-    queued("queued"),
-    started("started"),
-    running("running"),
-    complete("complete"),
-    failed("failed"),
-    canceled("canceled")
+    Pending("Pending"),
+    Queued("Queued"),
+    Started("Started"),
+    Complete("Complete"),
+    Failed("Failed"),
+    Canceled("Canceled")
 }
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ActionData.kt b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ActionData.kt
index 7e19db2..32a4b6a 100644
--- a/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ActionData.kt
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ActionData.kt
@@ -18,19 +18,27 @@
 
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 
-
-
 /*
     Adding default values just for the sake of Scala
  */
-data class ActionData(var status: ActionStatus = ActionStatus.pending,
+data class ActionData(var status: ActionStatus = ActionStatus.Pending,
                       var name: String= "",
                       var src: String= "",
+                      var config: String= "",
                       var groupId: String= "",
                       var typeId: String= "",
                       var id: String= "",
                       var exports: Map<String, String> = mutableMapOf(),
-                      var nextActionIds: List<String> = listOf()) {
-    lateinit var errorActionId: String
+                      var nextActionIds: MutableList<String> = mutableListOf()) {
 
+    lateinit var errorActionId: String
+    lateinit var artifact: Artifact
+    lateinit var repo: Repo
+    lateinit var entryClass: String
+
+    val hasErrorAction: Boolean
+        get() = ::errorActionId.isInitialized
+
+    val hasArtifact: Boolean
+        get() = ::artifact.isInitialized
 }
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Artifact.kt
old mode 100755
new mode 100644
similarity index 79%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Artifact.kt
index b7b0407..f43c77a
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Artifact.kt
@@ -6,7 +6,7 @@
  * (the "License"); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *    http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.common.dataobjects
 
-package object leader {
-
-}
+data class Artifact(val groupId: String, val artifactId: String, val version: String)
\ No newline at end of file
diff --git a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ExecData.kt
similarity index 74%
copy from common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ExecData.kt
index d16d6f8..763c28e 100644
--- a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/ExecData.kt
@@ -16,7 +16,8 @@
  */
 package org.apache.amaterasu.common.dataobjects
 
-import org.apache.amaterasu.common.execution.dependencies.{Dependencies, PythonDependencies}
+import org.apache.amaterasu.common.execution.dependencies.Dependencies
+import org.apache.amaterasu.common.execution.dependencies.PythonDependencies
 import org.apache.amaterasu.common.runtime.Environment
 
-case class ExecData(env: Environment, deps: Dependencies, pyDeps: PythonDependencies, configurations: Map[String, Map[String, Any]])
+data class ExecData(val env: Environment, val deps: Dependencies?, val pyDeps: PythonDependencies?, val configurations: Map<String, Map<String, Any>>)
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Repo.kt
old mode 100755
new mode 100644
similarity index 81%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Repo.kt
index b7b0407..2aa9db8
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/Repo.kt
@@ -6,7 +6,7 @@
  * (the "License"); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *    http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.common.dataobjects
 
-package object leader {
-
-}
+data class Repo(val id: String, val type: String, val url: String)
\ No newline at end of file
diff --git a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/TaskData.kt
similarity index 79%
rename from common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala
rename to common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/TaskData.kt
index d16d6f8..53a5e20 100644
--- a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ExecData.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/dataobjects/TaskData.kt
@@ -16,7 +16,6 @@
  */
 package org.apache.amaterasu.common.dataobjects
 
-import org.apache.amaterasu.common.execution.dependencies.{Dependencies, PythonDependencies}
 import org.apache.amaterasu.common.runtime.Environment
 
-case class ExecData(env: Environment, deps: Dependencies, pyDeps: PythonDependencies, configurations: Map[String, Map[String, Any]])
+data class TaskData(val src: String, val env: Environment, val groupId: String, val typeId: String, val exports: Map<String, String>)
diff --git a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notification.kt
old mode 100755
new mode 100644
similarity index 70%
copy from common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notification.kt
index f49d8ad..eb0ac29
--- a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notification.kt
@@ -14,17 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu.common.runtime
+package org.apache.amaterasu.common.execution.actions
 
-case class Environment() {
+import org.apache.amaterasu.common.execution.actions.enums.NotificationLevel
+import org.apache.amaterasu.common.execution.actions.enums.NotificationType
 
-  var name: String = ""
-  var master: String = ""
-
-  var inputRootPath: String = ""
-  var outputRootPath: String = ""
-  var workingDir: String = ""
-
-  var configuration: Map[String, String] = _
-
-}
\ No newline at end of file
+data class Notification (val line: String, val msg: String, val notType: NotificationType, val notLevel: NotificationLevel)
diff --git a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notifier.kt
old mode 100755
new mode 100644
similarity index 74%
copy from common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notifier.kt
index f49d8ad..a9b6dfa
--- a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/Notifier.kt
@@ -14,17 +14,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu.common.runtime
+package org.apache.amaterasu.common.execution.actions
 
-case class Environment() {
+import org.apache.amaterasu.common.logging.KLogging
 
-  var name: String = ""
-  var master: String = ""
+abstract class Notifier : KLogging() {
 
-  var inputRootPath: String = ""
-  var outputRootPath: String = ""
-  var workingDir: String = ""
+    abstract fun info(msg: String)
 
-  var configuration: Map[String, String] = _
+    abstract fun success(line: String)
+
+    abstract fun error(line: String, msg: String)
 
 }
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationLevel.kt
old mode 100755
new mode 100644
similarity index 76%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationLevel.kt
index b7b0407..c77c638
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationLevel.kt
@@ -6,7 +6,7 @@
  * (the "License"); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *    http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.common.execution.actions.enums
 
-package object leader {
-
+enum class NotificationLevel(val value: String) {
+    Execution("Execution"),
+    Code("Code"),
+    None("None")
 }
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationType.kt
old mode 100755
new mode 100644
similarity index 76%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationType.kt
index b7b0407..226090f
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/actions/enums/NotificationType.kt
@@ -6,7 +6,7 @@
  * (the "License"); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ *    http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.common.execution.actions.enums
 
-package object leader {
-
-}
+enum class NotificationType(val value: String) {
+    Success("Success"),
+    Error("Error"),
+    Info("Info")
+}
\ No newline at end of file
diff --git a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/Dependencies.kt
old mode 100755
new mode 100644
similarity index 74%
copy from common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
copy to common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/Dependencies.kt
index f49d8ad..f12fd9a
--- a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/Dependencies.kt
@@ -14,17 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu.common.runtime
+package org.apache.amaterasu.common.execution.dependencies
 
-case class Environment() {
+import org.apache.amaterasu.common.dataobjects.Artifact
+import org.apache.amaterasu.common.dataobjects.Repo
 
-  var name: String = ""
-  var master: String = ""
-
-  var inputRootPath: String = ""
-  var outputRootPath: String = ""
-  var workingDir: String = ""
-
-  var configuration: Map[String, String] = _
-
-}
\ No newline at end of file
+data class Dependencies(val repos: List<Repo>, val artifacts: List<Artifact>)
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonDependencies.kt b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonDependencies.kt
new file mode 100644
index 0000000..b6bd7d3
--- /dev/null
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonDependencies.kt
@@ -0,0 +1,3 @@
+package org.apache.amaterasu.common.execution.dependencies
+
+data class PythonDependencies(val packages: List<PythonPackage>)
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonPackage.kt b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonPackage.kt
new file mode 100644
index 0000000..2d8d7ab
--- /dev/null
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/execution/dependencies/PythonPackage.kt
@@ -0,0 +1,3 @@
+package org.apache.amaterasu.common.execution.dependencies
+
+data class PythonPackage(val packageId: String, val index: String? = null, val channel: String? = null)
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/logging/KLogging.kt b/common/src/main/kotlin/org/apache/amaterasu/common/logging/KLogging.kt
index 2b4e411..cba04ec 100644
--- a/common/src/main/kotlin/org/apache/amaterasu/common/logging/KLogging.kt
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/logging/KLogging.kt
@@ -1,10 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.amaterasu.common.logging
 
 import org.slf4j.LoggerFactory
 
-/**
- * Created by Eran Bartenstein on 5/11/18.
- */
 abstract class KLogging {
     protected var log = LoggerFactory.getLogger(this.javaClass.name)
 }
diff --git a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala b/common/src/main/kotlin/org/apache/amaterasu/common/runtime/Environment.kt
old mode 100755
new mode 100644
similarity index 75%
rename from common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
rename to common/src/main/kotlin/org/apache/amaterasu/common/runtime/Environment.kt
index f49d8ad..b0a5252
--- a/common/src/main/scala/org/apache/amaterasu/common/runtime/Environment.scala
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/runtime/Environment.kt
@@ -16,15 +16,11 @@
  */
 package org.apache.amaterasu.common.runtime
 
-case class Environment() {
+data class Environment(
+        var name: String = "",
+        var master: String = "",
+        var inputRootPath: String = "",
+        var outputRootPath: String = "",
+        var workingDir: String = "",
 
-  var name: String = ""
-  var master: String = ""
-
-  var inputRootPath: String = ""
-  var outputRootPath: String = ""
-  var workingDir: String = ""
-
-  var configuration: Map[String, String] = _
-
-}
\ No newline at end of file
+        var configuration: Map<String, Any> = mapOf())
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/utils/ActiveNotifier.kt b/common/src/main/kotlin/org/apache/amaterasu/common/utils/ActiveNotifier.kt
new file mode 100644
index 0000000..947c7a8
--- /dev/null
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/utils/ActiveNotifier.kt
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.common.utils
+
+import org.apache.activemq.ActiveMQConnectionFactory
+import org.apache.amaterasu.common.execution.actions.Notification
+import org.apache.amaterasu.common.execution.actions.Notifier
+import org.apache.amaterasu.common.execution.actions.enums.NotificationLevel
+import org.apache.amaterasu.common.execution.actions.enums.NotificationType
+import org.codehaus.jackson.map.ObjectMapper
+import javax.jms.DeliveryMode
+import javax.jms.MessageProducer
+import javax.jms.Session
+
+class ActiveNotifier(address: String) : Notifier() {
+
+    private val mapper = ObjectMapper()
+
+    private var producer: MessageProducer
+    private var session: Session
+
+    init {
+        log.info("report address $address")
+
+        // setting up activeMQ connection
+        val connectionFactory = ActiveMQConnectionFactory(address)
+        val connection = connectionFactory.createConnection()
+        connection.start()
+        session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
+        val destination = session.createTopic("JOB.REPORT")
+        producer = session.createProducer(destination)
+        producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT)
+
+        //mapper.registerModule(KotlinModule())
+    }
+
+    override fun info(msg: String) {
+
+        val notification = Notification("", msg, NotificationType.Info, NotificationLevel.Execution)
+        val notificationJson = mapper.writeValueAsString(notification)
+
+        log.info(notificationJson)
+        val message = session.createTextMessage(notificationJson)
+        producer.send(message)
+
+    }
+
+    override fun error(line: String, msg: String) {
+
+        println("Error executing line: $line message: $msg")
+
+        val notification = Notification(line, msg, NotificationType.Error, NotificationLevel.Code)
+        val notificationJson = mapper.writeValueAsString(notification)
+        val message = session.createTextMessage(notificationJson)
+        producer.send(message)
+    }
+
+    override fun success(line: String) {
+
+        log.info("successfully executed line: $line")
+
+        val notification = Notification(line, "", NotificationType.Success, NotificationLevel.Code)
+        val notificationJson = mapper.writeValueAsString(notification)
+        val message = session.createTextMessage(notificationJson)
+        producer.send(message)
+    }
+}
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/utils/ArtifactUtil.kt b/common/src/main/kotlin/org/apache/amaterasu/common/utils/ArtifactUtil.kt
new file mode 100644
index 0000000..6e19e31
--- /dev/null
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/utils/ArtifactUtil.kt
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.common.utils
+
+import com.jcabi.aether.Aether
+import org.apache.amaterasu.common.dataobjects.Artifact
+import org.apache.amaterasu.common.dataobjects.Repo
+import org.sonatype.aether.repository.RemoteRepository
+import org.sonatype.aether.util.artifact.DefaultArtifact
+import org.sonatype.aether.util.artifact.JavaScopes
+import java.io.File
+
+class ArtifactUtil(repos: List<Repo> = listOf(), jobId: String) {
+
+    private var repo: File
+    private val remoteRepos: MutableList<RemoteRepository> = mutableListOf()
+
+    init {
+        val jarFile = File(this.javaClass.protectionDomain.codeSource.location.path)
+        val amaHome = File(jarFile.parent).parent
+        repo = File("$amaHome/dist/$jobId")
+
+        addRepos(repos)
+    }
+
+    fun addRepo(repo: Repo) {
+        remoteRepos.add(RemoteRepository(repo.id, repo.type, repo.url))
+    }
+
+    fun addRepos(repos: List<Repo>) {
+        repos.forEach { addRepo(it) }
+    }
+
+    fun getLocalArtifacts(artifact: Artifact): List<File> {
+
+        val aether = Aether(remoteRepos, repo)
+        val resolvedArtifacts = aether.resolve(DefaultArtifact(artifact.groupId, artifact.artifactId, "", "jar", artifact.version),
+                JavaScopes.RUNTIME)
+
+        return resolvedArtifacts.map { it.file }
+    }
+}
\ No newline at end of file
diff --git a/common/src/main/kotlin/org/apache/amaterasu/common/utils/FileUtil.kt b/common/src/main/kotlin/org/apache/amaterasu/common/utils/FileUtil.kt
new file mode 100644
index 0000000..626c7f4
--- /dev/null
+++ b/common/src/main/kotlin/org/apache/amaterasu/common/utils/FileUtil.kt
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.common.utils
+
+import org.apache.commons.io.FilenameUtils
+import software.amazon.awssdk.services.s3.S3Client
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider
+import org.apache.commons.validator.routines.UrlValidator
+import org.jets3t.service.S3ServiceException
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider
+import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider
+import software.amazon.awssdk.core.sync.ResponseTransformer
+import software.amazon.awssdk.regions.Region
+import software.amazon.awssdk.services.s3.model.GetObjectRequest
+import java.io.FileNotFoundException
+import java.io.IOException
+import java.lang.IllegalArgumentException
+import java.net.URL
+import java.nio.file.Paths
+
+class FileUtil(accessKeyId: String = "", secretAccessKey: String = "") {
+
+    private val schemes = arrayOf("http", "https", "s3", "s3a")
+    private val urlValidator = UrlValidator(schemes)
+
+    private var credentials: AwsCredentialsProvider = if (accessKeyId.isNotEmpty()) {
+        StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
+    } else {
+        InstanceProfileCredentialsProvider.builder().build()
+    }
+
+    fun downloadFile(remote: String): String {
+
+        assert(isSupportedUrl(remote))
+        val url = URL(remote)
+        var result = ""
+
+        try {
+
+            // https://s3-ap-southeast-2.amazonaws.com/amaterasu/BugBounty-TestUpload.txt
+            val scheme = url.protocol //http
+            if (scheme !in schemes) {
+                throw IllegalArgumentException("${url.protocol} not supported")
+            }
+
+            val host = url.host // s3-ap-southeast-2.amazonaws.com
+            val region: String = if (host == "s3.amazonaws.com") {
+                "us-east-1" //N.Virginia
+            } else {
+                host.removePrefix("s3-").removeSuffix(".amazonaws.com")
+            }
+
+            val path = url.path.removePrefix("/") // /amaterasu/testfile.txt
+            val split = path.split("/")
+            val bucket = split[0]
+            val key = split.subList(1, split.size).joinToString("/")
+
+            val s3 = S3Client.builder()
+                    .credentialsProvider(credentials)
+                    .region(Region.of(region))
+                    .build()
+
+            val request = GetObjectRequest.builder()
+                    .bucket(bucket)
+                    .key(key)
+                    .build()
+
+            s3.getObject(request, ResponseTransformer.toFile(Paths.get(FilenameUtils.getName(URL(remote).file))))
+            result = FilenameUtils.getName(URL(remote).file)
+
+        } catch (e: S3ServiceException) {
+            System.err.println(e.message)
+        } catch (e: FileNotFoundException) {
+            System.err.println(e.message)
+        } catch (e: IOException) {
+            System.err.println(e.message)
+        }
+        return result
+    }
+
+    fun isSupportedUrl(string: String): Boolean {
+        return urlValidator.isValid(string)
+    }
+
+}
\ No newline at end of file
diff --git a/common/src/main/scala/org/apache/amaterasu/common/configuration/ClusterConfig.scala b/common/src/main/scala/org/apache/amaterasu/common/configuration/ClusterConfig.scala
index 3661b48..a9d0d63 100755
--- a/common/src/main/scala/org/apache/amaterasu/common/configuration/ClusterConfig.scala
+++ b/common/src/main/scala/org/apache/amaterasu/common/configuration/ClusterConfig.scala
@@ -23,7 +23,7 @@
 import org.apache.amaterasu.common.logging.Logging
 import org.apache.commons.configuration.ConfigurationException
 
-import scala.collection.mutable
+import   scala.collection.mutable
 
 class ClusterConfig extends Logging {
 
@@ -40,7 +40,7 @@
   var distLocation: String = "local"
   var workingFolder: String = ""
   // TODO: get rid of hard-coded version
-  var pysparkPath: String = "spark-2.2.1-bin-hadoop2.7/bin/spark-submit"
+  var pysparkPath: String = _
   var Jar: String = _
   var JarName: String = _
   // the additionalClassPath is currently for testing purposes, when amaterasu is
@@ -91,8 +91,7 @@
 
   }
 
-
-  val YARN = new YARN()
+  val yarn = new YARN()
 
   class Spark {
     var home: String = ""
@@ -111,7 +110,6 @@
     }
   }
 
-
   object Webserver {
     var Port: String = ""
     var Root: String = ""
@@ -125,7 +123,7 @@
     }
   }
 
-  object Jobs {
+  class Jobs {
 
     var cpus: Double = 1
     var mem: Long = 1024
@@ -137,10 +135,10 @@
       if (props.containsKey("jobs.mem")) mem = props.getProperty("jobs.mem").toLong
       if (props.containsKey("jobs.repoSize")) repoSize = props.getProperty("jobs.repoSize").toLong
 
-      Tasks.load(props)
+      tasks.load(props)
     }
 
-    object Tasks {
+    class Tasks {
 
       var attempts: Int = 3
       var cpus: Int = 1
@@ -155,8 +153,12 @@
       }
     }
 
+    val tasks = new Tasks()
+
   }
 
+  val jobs = new Jobs()
+
   object AWS {
 
     var accessKeyId: String = ""
@@ -209,15 +211,18 @@
     if (props.containsKey("timeout")) timeout = props.getProperty("timeout").asInstanceOf[Double]
     if (props.containsKey("mode")) mode = props.getProperty("mode")
     if (props.containsKey("workingFolder")) workingFolder = props.getProperty("workingFolder", s"/user/$user")
-    if (props.containsKey("pysparkPath")) pysparkPath = props.getProperty("pysparkPath")
+    if (props.containsKey("pysparkPath")) pysparkPath = props.getProperty("pysparkPath") else pysparkPath = s"spark-${props.getProperty("spark.version")}/bin/spark-submit"
     // TODO: rethink this
     Jar = this.getClass.getProtectionDomain.getCodeSource.getLocation.toURI.getPath
     JarName = Paths.get(this.getClass.getProtectionDomain.getCodeSource.getLocation.getPath).getFileName.toString
 
-    Jobs.load(props)
+    val jobsss = new Jobs()
+    jobsss.load(props)
+
     Webserver.load(props)
-    YARN.load(props)
+    yarn.load(props)
     spark.load(props)
+    jobs.load(props)
 
     distLocation match {
 
diff --git a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ActionDataHelper.scala b/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ActionDataHelper.scala
deleted file mode 100644
index 75be6e7..0000000
--- a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/ActionDataHelper.scala
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.common.dataobjects
-
-import com.google.gson.Gson
-
-/*
-object ActionDataHelper {
-  private val gson = new Gson
-  def toJsonString(actionData: ActionData): String = {
-    gson.toJson(actionData)
-  }
-
-  def fromJsonString(jsonString: String) : ActionData = {
-    gson.fromJson[ActionData](jsonString, new ActionData().getClass)
-  }
-}
-*/
\ No newline at end of file
diff --git a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/TaskData.scala b/common/src/main/scala/org/apache/amaterasu/common/dataobjects/TaskData.scala
deleted file mode 100644
index a745581..0000000
--- a/common/src/main/scala/org/apache/amaterasu/common/dataobjects/TaskData.scala
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.common.dataobjects
-
-import org.apache.amaterasu.common.runtime.Environment
-
-
-/* TODO: Future eyal and yaniv - The TaskData class should support overriding configurations for execData configurations
-// more specifiably, if execData holds configurations for spark setup (vcores/memory) a task should be able to override those
-*/
-case class TaskData(src: String, env: Environment, groupId: String, typeId: String, exports: Map[String, String])
diff --git a/common/src/main/scala/org/apache/amaterasu/common/execution/actions/Notifier.scala b/common/src/main/scala/org/apache/amaterasu/common/execution/actions/Notifier.scala
deleted file mode 100755
index ee71f85..0000000
--- a/common/src/main/scala/org/apache/amaterasu/common/execution/actions/Notifier.scala
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.common.execution.actions
-
-import com.fasterxml.jackson.annotation.JsonProperty
-import org.apache.amaterasu.common.execution.actions.NotificationLevel.NotificationLevel
-import org.apache.amaterasu.common.execution.actions.NotificationType.NotificationType
-import org.apache.amaterasu.common.logging.Logging
-
-abstract class Notifier extends Logging {
-
-  def info(msg: String)
-
-  def success(line: String)
-
-  def error(line: String, msg: String)
-
-}
-
-
-object NotificationType extends Enumeration {
-
-  type NotificationType = Value
-  val success: NotificationType.Value = Value("success")
-  val error: NotificationType.Value = Value("error")
-  val info: NotificationType.Value = Value("info")
-
-}
-
-object NotificationLevel extends Enumeration {
-
-  type NotificationLevel = Value
-  val execution: NotificationLevel.Value = Value("execution")
-  val code: NotificationLevel.Value = Value("code")
-  val none: NotificationLevel.Value = Value("none")
-
-}
-
-case class Notification(@JsonProperty("line") line: String,
-                        @JsonProperty("msg") msg: String,
-                        @JsonProperty("notType") notType: NotificationType,
-                        @JsonProperty("notLevel") notLevel: NotificationLevel)
diff --git a/common/src/main/scala/org/apache/amaterasu/common/execution/dependencies/Dependencies.scala b/common/src/main/scala/org/apache/amaterasu/common/execution/dependencies/Dependencies.scala
deleted file mode 100755
index 855262b..0000000
--- a/common/src/main/scala/org/apache/amaterasu/common/execution/dependencies/Dependencies.scala
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.common.execution.dependencies
-
-import scala.collection.mutable.ListBuffer
-
-case class Dependencies(repos: ListBuffer[Repo], artifacts: List[Artifact])
-case class PythonDependencies(packages: List[PythonPackage])
-case class Repo(id: String, `type`: String, url: String)
-case class Artifact(groupId: String, artifactId: String, version: String)
-case class PythonPackage(packageId: String, index: Option[String] = None, channel: Option[String] = None) // Not really sure about this, basically I want default values but the ObjectMapper apparently doesn't support them
\ No newline at end of file
diff --git a/common/src/test/kotlin/org/apache/amaterasu/common/utils/FileTestUtils.kt b/common/src/test/kotlin/org/apache/amaterasu/common/utils/FileTestUtils.kt
new file mode 100644
index 0000000..2e0c5c2
--- /dev/null
+++ b/common/src/test/kotlin/org/apache/amaterasu/common/utils/FileTestUtils.kt
@@ -0,0 +1,25 @@
+//package org.apache.amaterasu.common.utils
+//
+//import org.jetbrains.spek.api.Spek
+//import org.jetbrains.spek.api.dsl.given
+//import org.jetbrains.spek.api.dsl.it
+//import org.jetbrains.spek.api.dsl.on
+//
+//import org.junit.Assert.*
+//import java.io.File
+//
+//class FileUtilTest: Spek({
+//
+//    given("an s3 url") {
+//        val url = "https://s3-ap-southeast-2.amazonaws.com/amaterasu/testfile.txt"
+//        val util =  FileUtil("", "")
+//        on("downloading file from s3") {
+//            val result: Boolean = util.downloadFile(url,"testfile.txt")
+//            it("is successful") {
+//                val resultFile = File("testfile.txt")
+//                assert(resultFile.exists())
+//            }
+//        }
+//    }
+//
+//})
\ No newline at end of file
diff --git a/docs/docs/config.md b/docs/docs/config.md
new file mode 100644
index 0000000..f49186a
--- /dev/null
+++ b/docs/docs/config.md
@@ -0,0 +1,79 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->  
+##Overview
+
+One of the core capabilities of Apache Amaterasu is configuration management for data pipelines. Configurations are stored in environments. By default, environments are defined in folders named `Env` that can be stored both at the root of the Amaterasu repo which is applied to all the actions in the repo as well as in the action folder under: `src/{action_name}/{env}/` which are available only for the specific action. 
+
+**Note:** When the same configuration value is defined at the root and for an action, the action level definition overrides the the global configuration.
+
+The following repo structure defines three environments (`dev`, `test` and `prod`) both at the root and for the `start` action:
+ 
+```
+repo
++-- env/
+|   +-- dev/
+|   |   +-- job.yaml
+|   |   +-- spark.yaml
+|   +-- test/
+|   |   +-- job.yaml
+|   |   +-- spark.yaml
+|   +-- prod/
+|       +-- job.yaml
+|       +-- spark.yaml
++-- src/
+|   +-- start/
+|       +-- dev/
+|       |   +-- job.yaml
+|       |   +-- spark.yaml
+|       +-- test/
+|       |   +-- job.yaml
+|       |   +-- spark.yaml
+|       +-- prod/
+|           +-- job.yaml
+|           +-- spark.yaml
++-- maki.yaml 
+
+```
+
+## Custom configuration locations
+
+Additional configuration paths can be added both for global and action configurations by specifying the `config` element in the `maki.yaml` as shown in the following example:
+
+```yaml
+config: myconfig/{env}/
+job-name:    amaterasu-test
+flow:
+    - name: start
+      config: cfg/start/{env}/
+      runner:
+          group: spark
+          type: python        
+      file: start.py
+
+```
+## Configuration Types
+
+Amaterasu allows the configuration of three main areas:
+
+### Frameworks
+
+All frameworks have their own configuration, Apache Amaterasu allows different frameworks to define their configurations per environment and by doing so, allowing to configure how actions will be configured when deployed.
+
+For more information about specific framework configuration options, look at the [frameworks](frameworks/) section of this documentation.
+
+### Datasets 
+### Custom Configuration
\ No newline at end of file
diff --git a/docs/docs/deployments.md b/docs/docs/deployments.md
new file mode 100644
index 0000000..4377da3
--- /dev/null
+++ b/docs/docs/deployments.md
@@ -0,0 +1,165 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+##Overview
+
+Amaterasu Deployments are a combination of yaml deployments definition (usually defined in the `maki.yml` or `maki.yaml` file), the environment configuration (described in the configuration section) and artifacts to be deployed.
+
+### The Deployments DSL
+The deployment DSL, allows developers to define Actions to be deployed, their order of deployment and execution using a simple YAML definition. The following example:
+
+```yaml
+job-name:    amaterasu-test
+seq:
+    - name: start
+      runner:
+          group: spark
+          type: jar        
+      artifact: 
+          groupId: io.shinto
+          artifactId: amaterasu-simple-spark
+          version: 0.3
+      repo:
+          id: packagecloud
+          type: default
+          url: https://packagecloud.io/yanivr/amaterasu-demo/maven2
+      class: DataGenerator
+    - name: step2
+      runner:
+          group: spark
+          type: pyspark
+      file: file.py
+      
+```
+
+The above deployment, defines two actions which will run sequantially. Each action, defines a [framework](../frameworks/) runner to be used and an executable to be run.
+
+### Executables
+Cureently, Amaterasu actions can define two types of executables:
+
+ - **Files** 
+   
+File executables can be located inside the Amaterasu repo, under the `src` folder, for example, in the following action definition:
+ 
+ 
+```yaml
+job-name:    amaterasu-test
+seq:     
+    - name: step1
+      runner:
+         group: spark
+         type: pyspark
+      file: file.py
+```
+     
+the executable `file.py` would be located under the src folder as follows:
+
+```yaml
+repo/
++-- src/
+|   +-- file.py
++-- maki.yaml
+
+```          
+
+Files can also be specified as URLs, where currently the `http`, `https` and `s3a` schemas are supported at this time for example:
+
+```yaml
+job-name:    amaterasu-test
+seq:     
+    - name: step1
+      runner:
+         group: spark
+         type: pyspark
+      file: s3a://my-source-bucket/file.py
+```
+
+ - **Artifacts** 
+ 
+Currently, the artifact directive supports only artifacts stored in Maven repositories. In addition to the artifact details, you will need to specify the details of the repository where the artifact is available. The following example, fetches an artifact to be submitted as a spark job:
+
+```yaml
+job-name:    amaterasu-test
+seq:
+    - name: start
+      runner:
+          group: spark
+          type: jar        
+      artifact: 
+          groupId: io.shinto
+          artifactId: amaterasu-simple-spark
+          version: 0.3
+      repo:
+          id: packagecloud
+          type: default
+          url: https://packagecloud.io/yanivr/amaterasu-demo/maven2
+      class: DataGenerator
+```
+#### Error Handling Actions
+
+When an action fails, Amaterasu will re-queue that action for execution a configurable number of times. If the action continues to fail, Amaterasu allows for the definition of error handling actions that will execute when an action fails repeatedly. The following deployment defines an error handling action. 
+
+```yaml
+job-name:    amaterasu-sample
+flow:
+    - name: start
+      runner:
+         group: spark
+         type: pyspark
+      file: file.py
+      error:        
+         name: error
+         runner:
+            group: spark
+            type: pyspark
+         file: error.py        
+
+```
+ 
+## Dependencies
+
+ In addition to defining executables, Amaterasu jobs and actions can define dependencies to be deployed in the containers and used in runtime. Dependencies can be defined either globally for a job, under the `deps` folder, or per action in the action folder:
+ 
+```
+repo
++-- deps/
+|   +-- jars.yaml          #contains golbal depenedencies which are deployed in all action containers
++-- src/
+|   +-- start/
+|   |   +-- jars.yaml      #contains depenedencies which are deployed only in the start action container
+|   +-- file.py
++-- maki.yaml 
+
+```
+
+ - Java/JVM Dependencies
+ 
+JVM dependencies are defined in the `jars.yaml` file, the file defines a set of dependencies and repositories where those dependencies are available. THe following example shows a simple `jars.yaml` file:
+
+```yaml
+repos:
+  - id: maven-central
+    type: default
+    url: http://central.maven.org/maven2/
+artifacts:  
+  - groupId: com.flyberrycapital
+    artifactId: scala-slack_2.10
+    version: 0.3.0
+
+```
+ 
+ - Python Dependencies
\ No newline at end of file
diff --git a/docs/docs/frameworks.md b/docs/docs/frameworks.md
new file mode 100644
index 0000000..3ee07c5
--- /dev/null
+++ b/docs/docs/frameworks.md
@@ -0,0 +1,53 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+# Overview
+
+Amaterasu supports different processing frameworks to be executed. Amaterasu frameworks provides two main components for integrating with such frameworks:
+
+ - **Dispatcher** 
+ 
+   The dispatcher is in charge of creating and configuring a containers for actions of a specific framework. It makes sure that the executable and any dependencies are available in the container, as well as the environment configuration files, and sets the command to be executed.  
+   
+ - **Runtime Library**
+   
+   The runtime library provide an easy way to consume environment configuration and share data between actions. The main entry point for doing so is using the Amaterasu Context object. Amaterasu Context exposes the following functionality:
+   
+   **Note:** Each runtime (Java, Python, etc.) and framework have slightly different implementation of the Amaterasu context. To develop using a specific Framework, please consult the frameworks documentation bellow.
+   
+   - **Env**
+      
+    The env object contains the configuration for the current environment.
+    
+    
+    
+
+   - **Datasets and Dataset configuration**
+    
+    While [datasets](config/#datasets/) are configured under an environment, Amaterasu datasets are treated differently from other configurations, as they provide the integration point between different actions. Datasets can be either consumed as a configuration or to be loaded directly into an appropriate data structure for the specific framework and runtime. 
+
+# Amaterasu Frameworks
+
+## Apache Spark
+
+### Spark Configuration
+
+### Scala
+### PySpark
+
+## Python 
+
+## Java and JVM programs
\ No newline at end of file
diff --git a/docs/docs/images/amaterasu-logo-web.png b/docs/docs/images/amaterasu-logo-web.png
new file mode 100644
index 0000000..4f60d19
--- /dev/null
+++ b/docs/docs/images/amaterasu-logo-web.png
Binary files differ
diff --git a/docs/docs/index.md b/docs/docs/index.md
new file mode 100755
index 0000000..3d92cfb
--- /dev/null
+++ b/docs/docs/index.md
@@ -0,0 +1,88 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Apache Amaterasu (incubating) [![Build Status](https://travis-ci.org/apache/incubator-amaterasu.svg?branch=master)](https://travis-ci.org/apache/incubator-amaterasu)
+
+![Apache Amaterasu](images/amaterasu-logo-web.png)                                                        
+
+Apache Amaterasu is an open-source framework providing configuration management and deployment of containerized data pipelines. Amaterasu allows developers and data scientists to write, collaborate and easily deploy data pipelines to different cluster environments. Amaterasu allows them manage configuration and dependencies for different environments.
+
+## Main concepts
+
+### Repo
+
+Amaterasu jobs are defined within and Amaterasu repository. A repository is a filesystem structure stored in a git repository that contains definitions for the following components: 
+ 
+#### Actions
+
+Put simply, an action is a process that is being managed by Amaterasu. In order to deploy and manage an actions Amaterasu is creating a container with the action, its dependencies and configuration, and deploys it on a cluster (currently only Apache Mesos and YARN clusters are supported with Kubernetes planned for later version).
+
+#### Frameworks
+
+Apache Amaterasu is able to configure and interact with different data processing frameworks. Supported frameworks can be easily configured for deployment, and also integrate seamlessly with custom APIs. 
+For more information about supported frameworks and how to support additional frameworks seeour  [Frameworks](frameworks/) section.
+
+#### Configuration and Environments  
+
+One of the main objectives of Amaterasu is to manage configuration [configuration](config/) for data pipelines. Amaterasu configurations are stored per environment allowing the same pipeline to be deployed with a configuration that fits it's environment.
+
+#### Deployments
+
+Amaterasu [deployments](deployments/) are stored in a `maki.yml` or `maki.yaml` file in the root of the amaterasu repository. The deployment definition contains the different actions, and their order of deployment and execution.
+
+## Setting up Amaterasu  
+
+### Download
+
+Amaterasu is available for [download](http://amaterasu.incubator.apache.org/downloads.html) download page.
+You need to download Amaterasu and extract it on to a node in the cluster. Once you do that, you are just a couple of easy steps away from running your first job.
+
+### Configuration
+
+Configuring amaterasu is simply done buy editing the `amaterasu.properties` file in the top-level amaterasu directory. 
+
+Because Amaterasu supports several cluster environments (currently it supports Apache Mesos and Apache YARN) 
+
+#### Apache Mesos
+
+| property   | Description                    | Value          |
+| ---------- | ------------------------------ | -------------- |
+| Mode       | The cluster manager to be used | mesos          |
+| zk         | The ZooKeeper connection<br> string to be used by<br> amaterasu | The address of a zookeeper node  |
+| master     | The clusters' Mesos master | The address of the Mesos Master    |
+| user       | The user that will be used<br> to run amaterasu | root          |
+
+#### Apache YARN
+
+**Note:**  Different Hadoop distributions need different variations of the YARN configuration. Amaterasu is currently tested regularly with HDP and Amazon EMR. 
+
+
+| property   | Description                    | Value          |
+| ---------- | ------------------------------ | -------------- |
+| Mode       | The cluster manager to be used | mesos          |
+| zk         | The ZooKeeper connection<br> string to be used by<br> amaterasu | The address of a zookeeper node  |
+
+
+## Running a Job
+
+To run an amaterasu job, run the following command in the top-level amaterasu directory:
+
+```
+ama-start.sh --repo="https://github.com/shintoio/amaterasu-job-sample.git" --branch="master" --env="test" --report="code" 
+```
+
+We recommend you either fork or clone the job sample repo and use that as a starting point for creating your first job.
\ No newline at end of file
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
new file mode 100644
index 0000000..0036a81
--- /dev/null
+++ b/docs/mkdocs.yml
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+site_name: Apache Amaterasu Documentation
+nav:
+    - Home: index.md
+    - Defining Deployments: deployments.md
+    - Configuration Managment: config.md
+    - Frameworks: frameworks.md
+theme: readthedocs
\ No newline at end of file
diff --git a/executor/build.gradle b/executor/build.gradle
index 2cdc35a..443fc38 100644
--- a/executor/build.gradle
+++ b/executor/build.gradle
@@ -58,18 +58,15 @@
     compile group: 'org.apache.commons', name: 'commons-lang3', version: '3.5'
     compile group: 'org.apache.maven', name: 'maven-core', version: '3.0.5'
     compile group: 'org.reflections', name: 'reflections', version: '0.9.10'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.6.5'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.6.5'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.6.5'
-    compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-scala_2.11', version: '2.6.5'
-    compile group: 'net.liftweb', name: 'lift-json_2.11', version: '3.2.0'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-scala_2.11', version: '2.9.8'
 
     compile('com.jcabi:jcabi-aether:0.10.1') {
         exclude group: 'org.jboss.netty'
     }
-    compile('org.apache.activemq:activemq-client:5.15.2') {
-        exclude group: 'org.jboss.netty'
-    }
+
 
     compile project(':common')
     compile project(':amaterasu-sdk')
diff --git a/executor/src/main/scala/org/apache/amaterasu/executor/common/executors/ActiveNotifier.scala b/executor/src/main/scala/org/apache/amaterasu/executor/common/executors/ActiveNotifier.scala
deleted file mode 100644
index 90e624b..0000000
--- a/executor/src/main/scala/org/apache/amaterasu/executor/common/executors/ActiveNotifier.scala
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.executor.common.executors
-
-import javax.jms.{DeliveryMode, MessageProducer, Session}
-
-
-import net.liftweb.json._
-import net.liftweb.json.Serialization.write
-import org.apache.activemq.ActiveMQConnectionFactory
-import org.apache.amaterasu.common.execution.actions.{Notification, NotificationLevel, NotificationType, Notifier}
-import org.apache.amaterasu.common.logging.Logging
-
-class ActiveNotifier extends Notifier {
-
-  var producer: MessageProducer = _
-  var session: Session = _
-
-  implicit val formats = DefaultFormats
-
-  override def info(message: String): Unit = {
-
-    log.info(message)
-
-    val notification = Notification("", message, NotificationType.info, NotificationLevel.execution)
-    val notificationJson = write(notification)
-    val msg = session.createTextMessage(notificationJson)
-    producer.send(msg)
-
-  }
-
-  override def success(line: String): Unit = {
-
-    log.info(s"successfully executed line: $line")
-
-    val notification = Notification(line, "", NotificationType.success, NotificationLevel.code)
-    val notificationJson = write(notification)
-    val msg = session.createTextMessage(notificationJson)
-    producer.send(msg)
-
-  }
-
-  override def error(line: String, message: String): Unit = {
-
-    log.error(s"Error executing line: $line message: $message")
-
-    val notification = Notification(line, message, NotificationType.error, NotificationLevel.code)
-    val notificationJson = write(notification)
-    val msg = session.createTextMessage(notificationJson)
-    producer.send(msg)
-
-  }
-}
-
-object ActiveNotifier extends Logging {
-  def apply(address: String): ActiveNotifier = {
-
-    // setting up activeMQ connection
-    val connectionFactory = new ActiveMQConnectionFactory(address)
-    val connection = connectionFactory.createConnection()
-    connection.start()
-    val session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
-    val destination = session.createTopic("JOB.REPORT")
-    val producer = session.createProducer(destination)
-    producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT)
-
-    // creating notifier
-    val notifier = new ActiveNotifier
-    notifier.session = session
-    notifier.producer = producer
-
-    notifier
-  }
-}
\ No newline at end of file
diff --git a/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosActionsExecutor.scala b/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosActionsExecutor.scala
index ab5bc50..f204db8 100755
--- a/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosActionsExecutor.scala
+++ b/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosActionsExecutor.scala
@@ -106,11 +106,11 @@
         .setTaskId(taskInfo.getTaskId)
         .setState(TaskState.TASK_RUNNING).build()
       driver.sendStatusUpdate(status)
-      val runner = providersFactory.getRunner(taskData.groupId, taskData.typeId)
+      val runner = providersFactory.getRunner(taskData.getGroupId, taskData.getTypeId)
       runner match {
-        case Some(r) => r.executeSource(taskData.src, actionName, taskData.exports.asJava)
+        case Some(r) => r.executeSource(taskData.getSrc, actionName, taskData.getExports)
         case None =>
-          notifier.error("", s"Runner not found for group: ${taskData.groupId}, type ${taskData.typeId}. Please verify the tasks")
+          notifier.error("", s"Runner not found for group: ${taskData.getGroupId}, type ${taskData.getTypeId}. Please verify the tasks")
           None
       }
 
@@ -119,7 +119,7 @@
     task onComplete {
 
       case Failure(t) =>
-        println(s"launching task failed: ${t.getMessage}")
+        println(s"launching task Failed: ${t.getMessage}")
         System.exit(1)
 
       case Success(ts) =>
@@ -127,7 +127,7 @@
         driver.sendStatusUpdate(TaskStatus.newBuilder()
           .setTaskId(taskInfo.getTaskId)
           .setState(TaskState.TASK_FINISHED).build())
-        notifier.info(s"complete task: ${taskInfo.getTaskId.getValue}")
+        notifier.info(s"Complete task: ${taskInfo.getTaskId.getValue}")
 
     }
 
diff --git a/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosNotifier.scala b/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosNotifier.scala
index fcb453a..b256386 100755
--- a/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosNotifier.scala
+++ b/executor/src/main/scala/org/apache/amaterasu/executor/mesos/executors/MesosNotifier.scala
@@ -18,8 +18,8 @@
 
 import com.fasterxml.jackson.databind.ObjectMapper
 import com.fasterxml.jackson.module.scala.DefaultScalaModule
-import org.apache.amaterasu.common.execution.actions.{Notification, NotificationLevel, NotificationType, Notifier}
-import org.apache.amaterasu.common.logging.Logging
+import org.apache.amaterasu.common.execution.actions.enums.{NotificationLevel, NotificationType}
+import org.apache.amaterasu.common.execution.actions.{Notification, Notifier}
 import org.apache.mesos.ExecutorDriver
 
 
@@ -30,9 +30,10 @@
 
   override def success(line: String): Unit = {
 
-    log.info(s"successfully executed line: $line")
 
-    val notification = Notification(line, "", NotificationType.success, NotificationLevel.code)
+    getLog.info(s"successfully executed line: $line")
+
+    val notification = new Notification(line, "", NotificationType.Success, NotificationLevel.Code)
     val msg = mapper.writeValueAsBytes(notification)
 
     driver.sendFrameworkMessage(msg)
@@ -41,9 +42,9 @@
 
   override def error(line: String, message: String): Unit = {
 
-    log.error(s"Error executing line: $line message: $message")
+    getLog.error(s"Error executing line: $line message: $message")
 
-    val notification = Notification(line, message, NotificationType.error, NotificationLevel.code)
+    val notification = new Notification(line, message, NotificationType.Error, NotificationLevel.Code)
     val msg = mapper.writeValueAsBytes(notification)
 
     driver.sendFrameworkMessage(msg)
@@ -52,9 +53,9 @@
 
   override def info(message: String): Unit = {
 
-    log.info(message)
+    getLog.info(message)
 
-    val notification = Notification("", message, NotificationType.info, NotificationLevel.execution)
+    val notification = new Notification("", message, NotificationType.Info, NotificationLevel.Execution)
     val msg = mapper.writeValueAsBytes(notification)
 
     driver.sendFrameworkMessage(msg)
diff --git a/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/ActionsExecutor.scala b/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/ActionsExecutor.scala
index 282de68..cda6351 100644
--- a/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/ActionsExecutor.scala
+++ b/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/ActionsExecutor.scala
@@ -23,7 +23,8 @@
 import com.fasterxml.jackson.module.scala.DefaultScalaModule
 import org.apache.amaterasu.common.dataobjects.{ExecData, TaskData}
 import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.executor.common.executors.{ActiveNotifier, ProvidersFactory}
+import org.apache.amaterasu.common.utils.ActiveNotifier
+import org.apache.amaterasu.executor.common.executors.ProvidersFactory
 
 import scala.collection.JavaConverters._
 
@@ -38,11 +39,11 @@
   var providersFactory: ProvidersFactory = _
 
   def execute(): Unit = {
-    val runner = providersFactory.getRunner(taskData.groupId, taskData.typeId)
+    val runner = providersFactory.getRunner(taskData.getGroupId, taskData.getTypeId)
     runner match {
       case Some(r) => {
         try {
-          r.executeSource(taskData.src, actionName, taskData.exports.asJava)
+          r.executeSource(taskData.getSrc, actionName, taskData.getExports)
           log.info("Completed action")
           System.exit(0)
         } catch {
@@ -53,19 +54,17 @@
         }
       }
       case None =>
-        log.error("", s"Runner not found for group: ${taskData.groupId}, type ${taskData.typeId}. Please verify the tasks")
+        log.error("", s"Runner not found for group: ${taskData.getGroupId}, type ${taskData.getTypeId}. Please verify the tasks")
         System.exit(101)
     }
   }
 }
 
-// launched with args:
-//s"'${jobManager.jobId}' '${config.master}' '${actionData.name}' '${URLEncoder.encode(gson.toJson(taskData), "UTF-8")}' '${URLEncoder.encode(gson.toJson(execData), "UTF-8")}' '${actionData.id}-${container.getId.getContainerId}'"
 object ActionsExecutorLauncher extends Logging with App {
 
   val hostName = InetAddress.getLocalHost.getHostName
 
-  log.info(s"Hostname resolved to: $hostName")
+  println(s"Hostname resolved to: $hostName")
   val mapper = new ObjectMapper()
   mapper.registerModule(DefaultScalaModule)
 
@@ -90,9 +89,8 @@
 
   log.info("Setup executor")
   val baos = new ByteArrayOutputStream()
-  val notifier = ActiveNotifier(notificationsAddress)
+  val notifier = new ActiveNotifier(notificationsAddress)
 
-  log.info("Setup notifier")
   actionsExecutor.providersFactory = ProvidersFactory(execData, jobId, baos, notifier, taskIdAndContainerId, hostName, propFile = "./amaterasu.properties")
   actionsExecutor.execute()
 }
diff --git a/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/YarnNotifier.scala b/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/YarnNotifier.scala
deleted file mode 100644
index 831cfc8..0000000
--- a/executor/src/main/scala/org/apache/amaterasu/executor/yarn/executors/YarnNotifier.scala
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.executor.yarn.executors
-
-import org.apache.amaterasu.common.execution.actions.Notifier
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.ipc.YarnRPC
-
-class YarnNotifier(conf: YarnConfiguration) extends Notifier {
-
-  var rpc: YarnRPC = YarnRPC.create(conf)
-
-  override def info(msg: String): Unit = {
-    log.info(s"""-> ${msg}""")
-  }
-
-  override def success(line: String): Unit = {
-    log.info(s"""SUCCESS: ${line}""")
-  }
-
-  override def error(line: String, msg: String): Unit = {
-    log.error(s"""ERROR: ${line}: ${msg}""")
-  }
-}
diff --git a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/SparkSetupProvider.scala b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/SparkSetupProvider.scala
index c400e6b..103b18d 100644
--- a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/SparkSetupProvider.scala
+++ b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/SparkSetupProvider.scala
@@ -24,6 +24,7 @@
 import org.apache.amaterasu.leader.common.utilities.{DataLoader, MemoryFormatParser}
 import org.apache.amaterasu.sdk.frameworks.configuration.DriverConfiguration
 import org.apache.amaterasu.sdk.frameworks.{FrameworkSetupProvider, RunnerSetupProvider}
+import org.apache.commons.lang.StringUtils
 
 import scala.collection.mutable
 import collection.JavaConversions._
@@ -38,12 +39,14 @@
 
   private def loadSparkConfig: mutable.Map[String, Any] = {
 
+    println(s"===> env=$env")
+
     val execData = DataLoader.getExecutorData(env, conf)
-    val sparkExecConfiguration = execData.configurations.get("spark")
+    val sparkExecConfiguration = execData.getConfigurations.get("spark")
     if (sparkExecConfiguration.isEmpty) {
       throw new Exception(s"Spark configuration files could not be loaded for the environment $env")
     }
-    collection.mutable.Map(sparkExecConfiguration.get.toSeq: _*)
+    collection.mutable.Map(sparkExecConfiguration.toSeq: _*)
 
   }
 
@@ -51,8 +54,9 @@
     this.env = env
     this.conf = conf
 
+//    this.sparkExecConfigurations = loadSparkConfig
     runnerProviders += ("scala" -> SparkScalaRunnerProvider(conf))
-    runnerProviders += ("scala-shell" -> SparkShellScalaRunnerProvider(conf))
+    runnerProviders += ("jar" -> SparkSubmitScalaRunnerProvider(conf))
     runnerProviders += ("pyspark" -> PySparkRunnerProvider(conf))
 
   }
@@ -60,16 +64,15 @@
   override def getGroupIdentifier: String = "spark"
 
   override def getGroupResources: Array[File] = conf.mode match {
-      case "mesos" => Array[File](new File(s"spark-${conf.Webserver.sparkVersion}.tgz"), new File(s"spark-runner-${conf.version}-all.jar"), new File(s"spark-runtime-${conf.version}.jar"))
-      case "yarn" => new File(conf.spark.home).listFiles
-      case _ => Array[File]()
-    }
+    case "mesos" => Array[File](new File(s"spark-${conf.Webserver.sparkVersion}.tgz"), new File(s"spark-runner-${conf.version}-all.jar"), new File(s"spark-runtime-${conf.version}.jar"))
+    case "yarn" => Array[File](new File(s"spark-runner-${conf.version}-all.jar"), new File(s"spark-runtime-${conf.version}.jar"), new File(s"executor-${conf.version}-all.jar"), new File(conf.spark.home))
+    case _ => Array[File]()
+  }
 
 
   override def getEnvironmentVariables: util.Map[String, String] = conf.mode match {
-    case "mesos" => Map[String, String](s"SPARK_HOME" -> s"spark-${conf.Webserver.sparkVersion}",
-      s"MESOS_NATIVE_JAVA_LIBRARY" -> s"/opt/mesosphere/libmesos-bundle/lib/libmesos.so")
-    case "yarn" => Map[String, String]("SPARK_HOME" -> "spark")
+    case "mesos" => Map[String, String]("SPARK_HOME" -> s"spark-${conf.Webserver.sparkVersion}", "SPARK_HOME_DOCKER" -> "/opt/spark/")
+    case "yarn" => Map[String, String]("SPARK_HOME" -> StringUtils.stripStart(conf.spark.home, "/"))
     case _ => Map[String, String]()
   }
 
@@ -83,22 +86,22 @@
       cpu = conf.spark.opts("yarn.am.cores").toInt
     } else if (conf.spark.opts.contains("driver.cores")) {
       cpu = conf.spark.opts("driver.cores").toInt
-    } else if (conf.YARN.Worker.cores > 0) {
-      cpu = conf.YARN.Worker.cores
+    } else if (conf.yarn.Worker.cores > 0) {
+      cpu = conf.yarn.Worker.cores
     } else {
       cpu = 1
     }
     var mem: Int = 0
     if (sparkExecConfigurations.get("spark.yarn.am.memory").isDefined) {
       mem = MemoryFormatParser.extractMegabytes(sparkExecConfigurations("spark.yarn.am.memory").toString)
-    } else if (sparkExecConfigurations.get("spark.driver.memory").isDefined) {
-      mem = MemoryFormatParser.extractMegabytes(sparkExecConfigurations("spark.driver.memory").toString)
+    } else if (sparkExecConfigurations.get("spark.driver.memeory").isDefined) {
+      mem = MemoryFormatParser.extractMegabytes(sparkExecConfigurations("spark.driver.memeory").toString)
     } else if (conf.spark.opts.contains("yarn.am.memory")) {
       mem = MemoryFormatParser.extractMegabytes(conf.spark.opts("yarn.am.memory"))
     } else if (conf.spark.opts.contains("driver.memory")) {
       mem = MemoryFormatParser.extractMegabytes(conf.spark.opts("driver.memory"))
-    } else if (conf.YARN.Worker.memoryMB > 0) {
-      mem = conf.YARN.Worker.memoryMB
+    } else if (conf.yarn.Worker.memoryMB > 0) {
+      mem = conf.yarn.Worker.memoryMB
     } else if (conf.taskMem > 0) {
       mem = conf.taskMem
     } else {
diff --git a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/PySparkRunnerProvider.scala b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/PySparkRunnerProvider.scala
index ce3edb9..d0a2442 100644
--- a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/PySparkRunnerProvider.scala
+++ b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/PySparkRunnerProvider.scala
@@ -6,6 +6,7 @@
 import org.apache.amaterasu.common.dataobjects.ActionData
 import org.apache.amaterasu.leader.common.utilities.DataLoader
 import org.apache.amaterasu.sdk.frameworks.RunnerSetupProvider
+import org.apache.commons.lang.StringUtils
 import org.apache.hadoop.yarn.api.ApplicationConstants
 
 class PySparkRunnerProvider extends RunnerSetupProvider {
@@ -19,11 +20,11 @@
       s"java -cp executor-${conf.version}-all.jar:spark-runner-${conf.version}-all.jar:spark-runtime-${conf.version}.jar:spark-${conf.Webserver.sparkVersion}/jars/* " +
       s"-Dscala.usejavacp=true -Djava.library.path=$libPath org.apache.amaterasu.executor.mesos.executors.MesosActionsExecutor $jobId ${conf.master} ${actionData.getName}.stripMargin"
     case "yarn" => "/bin/bash ./miniconda.sh -b -p $PWD/miniconda && " +
-      s"/bin/bash spark/bin/load-spark-env.sh && " +
-      s"java -cp spark/jars/*:executor.jar:spark-runner.jar:spark-runtime.jar:spark/conf/:${conf.YARN.hadoopHomeDir}/conf/ " +
+      s"/bin/bash ${StringUtils.stripStart(conf.spark.home,"/")}/conf/load-spark.sh && " +
+      s"java -cp ${StringUtils.stripStart(conf.spark.home,"/")}/jars/*:executor-${conf.version}-all.jar:spark-runner-${conf.version}-all.jar:spark-runtime-${conf.version}.jar:${StringUtils.stripStart(conf.spark.home,"/")}/conf/:${conf.yarn.hadoopHomeDir}/conf/ " +
       "-Xmx2G " +
       "-Dscala.usejavacp=true " +
-      "-Dhdp.version=2.6.1.0-129 " +
+      "-Dhdp.version=2.6.5.0-292 " +
       "org.apache.amaterasu.executor.yarn.executors.ActionsExecutorLauncher " +
       s"'$jobId' '${conf.master}' '${actionData.getName}' '${URLEncoder.encode(DataLoader.getTaskDataString(actionData, env), "UTF-8")}' '${URLEncoder.encode(DataLoader.getExecutorDataString(env, conf), "UTF-8")}' '$executorId' '$callbackAddress' " +
       s"1> ${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stdout " +
@@ -34,11 +35,14 @@
   override def getRunnerResources: Array[String] =
     Array[String]("miniconda.sh", "spark_intp.py", "runtime.py", "codegen.py")
 
-  def getActionResources(jobId: String, actionData: ActionData): Array[String] =
+  override def getActionUserResources(jobId: String, actionData: ActionData): Array[String] =
     Array[String]()
 
   override def getActionDependencies(jobId: String, actionData: ActionData): Array[String] =
     Array[String]()
+
+  override def getHasExecutor: Boolean = true
+
 }
 
 object PySparkRunnerProvider {
diff --git a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkScalaRunnerProvider.scala b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkScalaRunnerProvider.scala
index cca7cda..618b95c 100644
--- a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkScalaRunnerProvider.scala
+++ b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkScalaRunnerProvider.scala
@@ -22,6 +22,7 @@
 import org.apache.amaterasu.common.dataobjects.ActionData
 import org.apache.amaterasu.leader.common.utilities.DataLoader
 import org.apache.amaterasu.sdk.frameworks.RunnerSetupProvider
+import org.apache.commons.lang.StringUtils
 import org.apache.hadoop.yarn.api.ApplicationConstants
 
 class SparkScalaRunnerProvider extends RunnerSetupProvider {
@@ -36,11 +37,12 @@
       s"java -cp /mnt/mesos/sandbox/executor-${conf.version}-all.jar:/mnt/mesos/sandbox/spark-runner-${conf.version}-all.jar:/mnt/mesos/sandbox/spark-runtime-${conf.version}.jar:/mnt/mesos/sandbox/spark-${conf.Webserver.sparkVersion}/jars/* " +
       s"-Dscala.usejavacp=true -Djava.library.path=$libPath:/opt/mesosphere/libmesos-bundle/lib/ " +
       s"org.apache.amaterasu.executor.mesos.executors.MesosActionsExecutor $jobId ${conf.master} ${actionData.getName}".stripMargin
-    case "yarn" => s"/bin/bash spark/bin/load-spark-env.sh && " +
-      s"java -cp spark/jars/*:executor.jar:spark-runner.jar:spark-runtime.jar:spark/conf/:${conf.YARN.hadoopHomeDir}/conf/ " +
+    case "yarn" =>
+      s"/bin/bash ${StringUtils.stripStart(conf.spark.home,"/")}/conf/spark-env.sh && " +
+      s"java -cp ${StringUtils.stripStart(conf.spark.home,"/")}/jars/*:executor-${conf.version}-all.jar:spark-runner-${conf.version}-all.jar:spark-runtime-${conf.version}.jar:${StringUtils.stripStart(conf.spark.home,"/")}/conf/:${conf.yarn.hadoopHomeDir}/conf/ " +
       "-Xmx2G " +
       "-Dscala.usejavacp=true " +
-      "-Dhdp.version=2.6.1.0-129 " +
+      "-Dhdp.version=2.6.5.0-292 " +
       "org.apache.amaterasu.executor.yarn.executors.ActionsExecutorLauncher " +
       s"'$jobId' '${conf.master}' '${actionData.getName}' '${URLEncoder.encode(DataLoader.getTaskDataString(actionData, env), "UTF-8")}' '${URLEncoder.encode(DataLoader.getExecutorDataString(env, conf), "UTF-8")}' '$executorId' '$callbackAddress' " +
       s"1> ${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stdout " +
@@ -52,11 +54,15 @@
     Array[String]()
 
 
-  def getActionResources(jobId: String, actionData: ActionData): Array[String] =
+  def getActionUserResources(jobId: String, actionData: ActionData): Array[String] =
     Array[String]()
 
   override def getActionDependencies(jobId: String, actionData: ActionData): Array[String] =
     Array[String]()
+
+  override def getHasExecutor: Boolean = true
+
+
 }
 
 object SparkScalaRunnerProvider {
diff --git a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkShellScalaRunnerProvider.scala b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkShellScalaRunnerProvider.scala
deleted file mode 100644
index 5d566a0..0000000
--- a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkShellScalaRunnerProvider.scala
+++ /dev/null
@@ -1,30 +0,0 @@
-package org.apache.amaterasu.frameworks.spark.dispatcher.runners.providers
-
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.sdk.frameworks.RunnerSetupProvider
-
-class SparkShellScalaRunnerProvider extends RunnerSetupProvider {
-
-  private var conf: ClusterConfig = _
-
-  override def getCommand(jobId: String, actionData: ActionData, env: String, executorId: String, callbackAddress: String): String =
-    s"$$SPARK_HOME/bin/spark-shell ${actionData.getSrc} --jars spark-runtime-${conf.version}.jar"
-
-  override def getRunnerResources: Array[String] =
-    Array[String]()
-
-  def getActionResources(jobId: String, actionData: ActionData): Array[String] =
-    Array[String](s"$jobId/${actionData.getName}/${actionData.getSrc}")
-
-  override def getActionDependencies(jobId: String, actionData: ActionData): Array[String] =  Array[String]()
-
-}
-
-object SparkShellScalaRunnerProvider {
-  def apply(conf: ClusterConfig): SparkShellScalaRunnerProvider = {
-    val result = new SparkShellScalaRunnerProvider
-    result.conf = conf
-    result
-  }
-}
\ No newline at end of file
diff --git a/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkSubmitScalaRunnerProvider.scala b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkSubmitScalaRunnerProvider.scala
new file mode 100644
index 0000000..1ceec51
--- /dev/null
+++ b/frameworks/spark/dispatcher/src/main/scala/org/apache/amaterasu/frameworks/spark/dispatcher/runners/providers/SparkSubmitScalaRunnerProvider.scala
@@ -0,0 +1,49 @@
+package org.apache.amaterasu.frameworks.spark.dispatcher.runners.providers
+
+import java.io.File
+
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.utils.ArtifactUtil
+import org.apache.amaterasu.sdk.frameworks.RunnerSetupProvider
+
+import scala.collection.JavaConverters._
+
+class SparkSubmitScalaRunnerProvider extends RunnerSetupProvider {
+
+  private var conf: ClusterConfig = _
+  val jarFile = new File(this.getClass.getProtectionDomain.getCodeSource.getLocation.getPath)
+  val amaDist = new File(s"${new File(jarFile.getParent).getParent}/dist")
+  val amalocation = new File(s"${new File(jarFile.getParent).getParent}")
+
+  override def getCommand(jobId: String, actionData: ActionData, env: String, executorId: String, callbackAddress: String): String = {
+
+    val util = new ArtifactUtil(List(actionData.repo).asJava, jobId)
+    val classParam = if (actionData.getHasArtifact) s" --class ${actionData.entryClass}" else ""
+    s"$$SPARK_HOME/bin/spark-submit $classParam ${util.getLocalArtifacts(actionData.getArtifact).get(0).getName} --deploy-mode client --jars spark-runtime-${conf.version}.jar >&1"
+  }
+
+  override def getRunnerResources: Array[String] =
+    Array[String]()
+
+  override def getActionUserResources(jobId: String, actionData: ActionData): Array[String] =
+    Array[String]()
+
+
+  override def getActionDependencies(jobId: String, actionData: ActionData): Array[String] =
+    Array[String]()
+
+
+  override def getHasExecutor: Boolean = false
+
+
+}
+
+object SparkSubmitScalaRunnerProvider {
+  def apply(conf: ClusterConfig): SparkSubmitScalaRunnerProvider = {
+    val result = new SparkSubmitScalaRunnerProvider
+
+    result.conf = conf
+    result
+  }
+}
\ No newline at end of file
diff --git a/frameworks/spark/runner/build.gradle b/frameworks/spark/runner/build.gradle
index ffe7a10..3a612f2 100644
--- a/frameworks/spark/runner/build.gradle
+++ b/frameworks/spark/runner/build.gradle
@@ -71,9 +71,6 @@
         exclude group: 'org.jboss.netty'
     }
 
-    //compile project(':common')
-    //compile project(':amaterasu-sdk')
-
     //runtime dependency for spark
     provided('org.apache.spark:spark-repl_2.11:2.2.1')
     provided('org.apache.spark:spark-core_2.11:2.2.1')
diff --git a/frameworks/spark/runner/src/main/java/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkEntryPoint.java b/frameworks/spark/runner/src/main/java/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkEntryPoint.java
index 6df5ef3..491d771 100755
--- a/frameworks/spark/runner/src/main/java/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkEntryPoint.java
+++ b/frameworks/spark/runner/src/main/java/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkEntryPoint.java
@@ -31,7 +31,7 @@
 
 public class PySparkEntryPoint {
 
-    //private static Boolean started = false;
+    //private static Boolean Started = false;
     private static  PySparkExecutionQueue queue = new PySparkExecutionQueue();
     private static ConcurrentHashMap<String, ResultQueue> resultQueues = new ConcurrentHashMap<>();
 
diff --git a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/SparkRunnersProvider.scala b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/SparkRunnersProvider.scala
index a48aaa0..e78bf95 100644
--- a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/SparkRunnersProvider.scala
+++ b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/SparkRunnersProvider.scala
@@ -24,7 +24,6 @@
 import org.apache.amaterasu.common.execution.actions.Notifier
 import org.apache.amaterasu.common.execution.dependencies.{Dependencies, PythonDependencies, PythonPackage}
 import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.frameworks.spark.runner.repl.{SparkRunnerHelper, SparkScalaRunner}
 import org.apache.amaterasu.frameworks.spark.runner.sparksql.SparkSqlRunner
 import org.apache.amaterasu.frameworks.spark.runner.pyspark.PySparkRunner
 import org.apache.amaterasu.frameworks.spark.runner.repl.{SparkRunnerHelper, SparkScalaRunner}
@@ -46,8 +45,8 @@
     (e: String) => log.error(e)
 
   )
-  private var conf: Option[Map[String, Any]] = _
-  private var executorEnv: Option[Map[String, Any]] = _
+  private var conf: Option[Map[String, AnyRef]] = None
+  private var executorEnv: Option[Map[String, AnyRef]] = None
   private var clusterConfig: ClusterConfig = _
 
   override def init(execData: ExecData,
@@ -65,24 +64,29 @@
     clusterConfig = config
     var jars = Seq.empty[String]
 
-    if (execData.deps != null) {
-      jars ++= getDependencies(execData.deps)
+    if (execData.getDeps != null) {
+      jars ++= getDependencies(execData.getDeps)
     }
 
-    if (execData.pyDeps != null &&
-      execData.pyDeps.packages.nonEmpty) {
-      loadPythonDependencies(execData.pyDeps, notifier)
+    if (execData.getPyDeps != null &&
+      execData.getPyDeps.getPackages.nonEmpty) {
+      loadPythonDependencies(execData.getPyDeps, notifier)
     }
 
-    conf = execData.configurations.get("spark")
-    executorEnv = execData.configurations.get("spark_exec_env")
+    if(execData.getConfigurations.containsKey("spark")){
+      conf = Some(execData.getConfigurations.get("spark").toMap)
+    }
+
+    if (execData.getConfigurations.containsKey("spark_exec_env")) {
+      executorEnv = Some( execData.getConfigurations.get("spark_exec_env").toMap)
+    }
     val sparkAppName = s"job_${jobId}_executor_$executorId"
 
     SparkRunnerHelper.notifier = notifier
-    val spark = SparkRunnerHelper.createSpark(execData.env, sparkAppName, jars, conf, executorEnv, config, hostName)
+    val spark = SparkRunnerHelper.createSpark(execData.getEnv, sparkAppName, jars, conf, executorEnv, config, hostName)
 
-    lazy val sparkScalaRunner = SparkScalaRunner(execData.env, jobId, spark, outStream, notifier, jars)
-    sparkScalaRunner.initializeAmaContext(execData.env)
+    lazy val sparkScalaRunner = SparkScalaRunner(execData.getEnv, jobId, spark, outStream, notifier, jars)
+    sparkScalaRunner.initializeAmaContext(execData.getEnv)
 
     runners.put(sparkScalaRunner.getIdentifier, sparkScalaRunner)
     var pypath = ""
@@ -93,19 +97,19 @@
       case "mesos" =>
         pypath = s"${new File(".").getAbsolutePath}/miniconda/pkgs:${new File(".").getAbsolutePath}"
     }
-    lazy val pySparkRunner = PySparkRunner(execData.env, jobId, notifier, spark, pypath, execData.pyDeps, config)
+    lazy val pySparkRunner = PySparkRunner(execData.getEnv, jobId, notifier, spark, pypath, execData.getPyDeps, config)
     runners.put(pySparkRunner.getIdentifier, pySparkRunner)
 
-    lazy val sparkSqlRunner = SparkSqlRunner(execData.env, jobId, notifier, spark)
+    lazy val sparkSqlRunner = SparkSqlRunner(execData.getEnv, jobId, notifier, spark)
     runners.put(sparkSqlRunner.getIdentifier, sparkSqlRunner)
   }
 
   private def installAnacondaPackage(pythonPackage: PythonPackage): Unit = {
-    val channel = pythonPackage.channel.getOrElse("anaconda")
+    val channel = pythonPackage.getChannel
     if (channel == "anaconda") {
-      Seq("bash", "-c", s"export HOME=$$PWD && ./miniconda/bin/python -m conda install -y ${pythonPackage.packageId}") ! shellLoger
+      Seq("bash", "-c", s"export HOME=$$PWD && ./miniconda/bin/python -m conda install -y ${pythonPackage.getPackageId}") ! shellLoger
     } else {
-      Seq("bash", "-c", s"export HOME=$$PWD && ./miniconda/bin/python -m conda install -y -c $channel ${pythonPackage.packageId}") ! shellLoger
+      Seq("bash", "-c", s"export HOME=$$PWD && ./miniconda/bin/python -m conda install -y -c $channel ${pythonPackage.getPackageId}") ! shellLoger
     }
   }
 
@@ -124,12 +128,12 @@
   private def loadPythonDependencies(deps: PythonDependencies, notifier: Notifier): Unit = {
     notifier.info("loading anaconda evn")
     installAnacondaOnNode()
-    val codegenPackage = PythonPackage("codegen", channel = Option("auto"))
+    val codegenPackage = new PythonPackage("codegen", "", "auto")
     installAnacondaPackage(codegenPackage)
     try {
       // notifier.info("loadPythonDependencies #5")
-      deps.packages.foreach(pack => {
-        pack.index.getOrElse("anaconda").toLowerCase match {
+      deps.getPackages.foreach(pack => {
+        pack.getIndex.toLowerCase match {
           case "anaconda" => installAnacondaPackage(pack)
           // case "pypi" => installPyPiPackage(pack)
         }
@@ -157,18 +161,18 @@
     // adding a local repo because Aether needs one
     val repo = new File(System.getProperty("java.io.tmpdir"), "ama-repo")
 
-    val remotes = deps.repos.map(r =>
+    val remotes = deps.getRepos.map(r =>
       new RemoteRepository(
-        r.id,
-        r.`type`,
-        r.url
+        r.getId,
+        r.getType,
+        r.getUrl
       )).toList.asJava
 
     val aether = new Aether(remotes, repo)
 
-    deps.artifacts.flatMap(a => {
+    deps.getArtifacts.flatMap(a => {
       aether.resolve(
-        new DefaultArtifact(a.groupId, a.artifactId, "", "jar", a.version),
+        new DefaultArtifact(a.getGroupId, a.getArtifactId, "", "jar", a.getVersion),
         JavaScopes.RUNTIME
       ).map(a => a)
     }).map(x => x.getFile.getAbsolutePath)
diff --git a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkRunner.scala b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkRunner.scala
index a60c827..06c8b86 100644
--- a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkRunner.scala
+++ b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/pyspark/PySparkRunner.scala
@@ -25,6 +25,7 @@
 import org.apache.amaterasu.common.logging.Logging
 import org.apache.amaterasu.common.runtime.Environment
 import org.apache.amaterasu.sdk.AmaterasuRunner
+import org.apache.commons.lang.StringUtils
 import org.apache.spark.SparkEnv
 import org.apache.spark.sql.SparkSession
 
@@ -49,7 +50,7 @@
     PySparkEntryPoint.getExecutionQueue.setForExec((source, actionName, exports))
     val resQueue = PySparkEntryPoint.getResultQueue(actionName)
 
-    notifier.info(s"================= started action $actionName =================")
+    notifier.info(s"================= Started action $actionName =================")
 
     var res: PySparkResult = null
 
@@ -101,8 +102,8 @@
     PySparkEntryPoint.start(spark, jobId, env, SparkEnv.get)
     val port = PySparkEntryPoint.getPort
     var intpPath = ""
-    if (env.configuration.contains("cwd")) {
-      val cwd = new File(env.configuration("cwd"))
+    if (env.getConfiguration.containsKey("cwd")) {
+      val cwd = new File(env.getConfiguration.get("cwd").toString)
       intpPath = s"${cwd.getAbsolutePath}/spark_intp.py" // This is to support test environment
     } else {
       intpPath = s"spark_intp.py"
@@ -114,7 +115,7 @@
     var sparkCmd: Seq[String] = Seq()
     config.mode match {
       case "yarn" =>
-        pysparkPath = s"spark/bin/spark-submit"
+        pysparkPath = s"${StringUtils.stripStart(config.spark.home,"/")}/bin/spark-submit"
         sparkCmd = Seq(pysparkPath, "--py-files", condaPkgs, "--master", "yarn", intpPath, port.toString)
         val proc = Process(sparkCmd, None,
           "PYTHONPATH" -> pypath,
@@ -132,7 +133,7 @@
         var pysparkPython = "/usr/bin/python"
 
         if (pyDeps != null &&
-          pyDeps.packages.nonEmpty) {
+          !pyDeps.getPackages.isEmpty) {
           pysparkPython = "./miniconda/bin/python"
         }
         val proc = Process(sparkCmd, None,
diff --git a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkRunnerHelper.scala b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkRunnerHelper.scala
index acb5981..84714e2 100644
--- a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkRunnerHelper.scala
+++ b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkRunnerHelper.scala
@@ -24,6 +24,7 @@
 import org.apache.amaterasu.common.logging.Logging
 import org.apache.amaterasu.common.runtime.Environment
 import org.apache.amaterasu.common.utils.FileUtils
+import org.apache.commons.lang.StringUtils
 import org.apache.spark.SparkConf
 import org.apache.spark.sql.SparkSession
 
@@ -128,24 +129,24 @@
       .set("spark.submit.pyFiles", pyfiles.mkString(","))
 
 
-    val master: String = if (env.master.isEmpty) {
+    val master: String = if (env.getMaster.isEmpty) {
       "yarn"
     } else {
-      env.master
+      env.getMaster
     }
 
     config.mode match {
 
       case "mesos" =>
-        conf.set("spark.executor.uri", s"http://$getNode:${config.Webserver.Port}/spark-2.2.1-bin-hadoop2.7.tgz")
+        conf.set("spark.executor.uri", s"http://$getNode:${config.Webserver.Port}/spark-${config.Webserver.sparkVersion}.tgz")
           .setJars(jars)
-          .set("spark.master", env.master)
-          .set("spark.home", s"${scala.reflect.io.File(".").toCanonical.toString}/spark-2.2.1-bin-hadoop2.7")
+          .set("spark.master", env.getMaster)
+          .set("spark.home", s"${scala.reflect.io.File(".").toCanonical.toString}/spark-${config.Webserver.sparkVersion}")
 
       case "yarn" =>
-        conf.set("spark.home", config.spark.home)
+        conf.set("spark.home", StringUtils.stripStart(config.spark.home,"/"))
           // TODO: parameterize those
-          .setJars(Seq("executor.jar", "spark-runner.jar", "spark-runtime.jar") ++ jars)
+          .setJars(Seq(s"executor-${config.version}-all.jar", s"spark-runner-${config.version}-all.jar", s"spark-runtime-${config.version}.jar") ++ jars)
           .set("spark.history.kerberos.keytab", "/etc/security/keytabs/spark.headless.keytab")
           .set("spark.driver.extraLibraryPath", "/usr/hdp/current/hadoop-client/lib/native:/usr/hdp/current/hadoop-client/lib/native/Linux-amd64-64")
           .set("spark.yarn.queue", "default")
@@ -153,12 +154,12 @@
 
           .set("spark.master", master)
           .set("spark.executor.instances", config.spark.opts.getOrElse("executor.instances", "1"))
-          .set("spark.yarn.jars", s"spark/jars/*")
+          .set("spark.yarn.jars", s"${StringUtils.stripStart(config.spark.home,"/")}/jars/*")
           .set("spark.executor.memory", config.spark.opts.getOrElse("executor.memory", "1g"))
           .set("spark.dynamicAllocation.enabled", "false")
           .set("spark.eventLog.enabled", "false")
           .set("spark.history.fs.logDirectory", "hdfs:///spark2-history/")
-          .set("hadoop.home.dir", config.YARN.hadoopHomeDir)
+          .set("hadoop.home.dir", config.yarn.hadoopHomeDir)
 
       case _ => throw new Exception(s"mode ${config.mode} is not legal.")
     }
@@ -182,21 +183,22 @@
     }
 
     // setting the executor env from spark_exec.yml
-    executorEnv match {
-      case Some(env) => {
-        for (c <- env) {
-          if (c._2.isInstanceOf[String])
-            conf.setExecutorEnv(c._1, c._2.toString)
+    if (executorEnv != null) {
+      executorEnv match {
+        case Some(env) => {
+          for (c <- env) {
+            if (c._2.isInstanceOf[String])
+              conf.setExecutorEnv(c._1, c._2.toString)
+          }
         }
+        case None =>
       }
-      case None =>
     }
-
     conf.set("spark.repl.class.outputDir", outputDir.getAbsolutePath)
 
     sparkSession = SparkSession.builder
       .appName(sparkAppName)
-      .master(env.master)
+      .master(env.getMaster)
 
       //.enableHiveSupport()
       .config(conf).getOrCreate()
diff --git a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunner.scala b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunner.scala
index f660939..7e089b5 100755
--- a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunner.scala
+++ b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunner.scala
@@ -56,7 +56,7 @@
 
   def interpretSources(source: Source, actionName: String, exports: Map[String, String]): Unit = {
 
-    notifier.info(s"================= started action $actionName =================")
+    notifier.info(s"================= Started action $actionName =================")
     //notifier.info(s"exports is: $exports")
 
     for (line <- source.getLines()) {
@@ -71,7 +71,6 @@
           interpreter.interpret(line)
         }
         else {
-
           val intresult = interpreter.interpret(line)
 
           val result = interpreter.prevRequestList.last.lineRep.call("$result")
@@ -87,9 +86,6 @@
 
               val resultName = interpreter.prevRequestList.last.termNames.last
 
-              //notifier.info(s" result name ${resultName.toString}")
-              //notifier.info(s" exist in exports: ${exports.contains(resultName.toString)}")
-
               if (exports.contains(resultName.toString)) {
 
                 val format = exports(resultName.toString)
@@ -99,12 +95,12 @@
                   result match {
                     case ds: Dataset[_] =>
                       log.debug(s"persisting DataFrame: $resultName")
-                      val writeLine = s"""$resultName.write.mode(SaveMode.Overwrite).format("$format").save("${env.workingDir}/$jobId/$actionName/$resultName")"""
+                      val writeLine = s"""$resultName.write.mode(SaveMode.Overwrite).format("$format").save("${env.getWorkingDir}/$jobId/$actionName/$resultName")"""
                       val writeResult = interpreter.interpret(writeLine)
                       if (writeResult != Results.Success) {
                         val err = outStream.toString
                         notifier.error(writeLine, err)
-                        log.error(s"error persisting dataset: $writeLine failed with: $err")
+                        log.error(s"error persisting dataset: $writeLine Failed with: $err")
                         //throw new Exception(err)
                       }
                       log.debug(s"persisted DataFrame: $resultName")
diff --git a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunner.scala b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunner.scala
index 1696174..9230bca 100644
--- a/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunner.scala
+++ b/frameworks/spark/runner/src/main/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunner.scala
@@ -48,7 +48,7 @@
    */
   override def executeSource(actionSource: String, actionName: String, exports: util.Map[String, String]): Unit = {
 
-    notifier.info(s"================= started action $actionName =================")
+    notifier.info(s"================= Started action $actionName =================")
 
     if (!actionSource.isEmpty) {
 
@@ -120,7 +120,7 @@
         val exportName = exportsBuff.head._1
         val exportFormat = exportsBuff.head._2
         //notifier.info(s"exporting to -> ${env.workingDir}/$jobId/$actionName/$exportName")
-        result.write.mode(SaveMode.Overwrite).format(exportFormat).save(s"${env.workingDir}/$jobId/$actionName/$exportName")
+        result.write.mode(SaveMode.Overwrite).format(exportFormat).save(s"${env.getWorkingDir}/$jobId/$actionName/$exportName")
       }
       notifier.info(s"================= finished action $actionName =================")
     }
diff --git a/frameworks/spark/runner/src/test/resources/amaterasu.properties b/frameworks/spark/runner/src/test/resources/amaterasu.properties
index d402fed..e95df02 100755
--- a/frameworks/spark/runner/src/test/resources/amaterasu.properties
+++ b/frameworks/spark/runner/src/test/resources/amaterasu.properties
@@ -1,3 +1,17 @@
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
 zk=127.0.0.1
 version=0.2.0-incubating
 master=192.168.33.11
diff --git a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/SparkTestsSuite.scala b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/SparkTestsSuite.scala
index 6de3643..80fc13c 100644
--- a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/SparkTestsSuite.scala
+++ b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/SparkTestsSuite.scala
@@ -18,7 +18,7 @@
 
 import java.io.{ByteArrayOutputStream, File}
 
-import org.apache.amaterasu.common.dataobjects.ExecData
+import org.apache.amaterasu.common.dataobjects.{Artifact, ExecData, Repo}
 import org.apache.amaterasu.common.execution.dependencies._
 import org.apache.amaterasu.common.runtime.Environment
 import org.apache.amaterasu.executor.common.executors.ProvidersFactory
@@ -30,10 +30,10 @@
 import org.scalatest._
 
 import scala.collection.mutable.ListBuffer
-
+import scala.collection.JavaConverters._
 
 class SparkTestsSuite extends Suites(
-  new PySparkRunnerTests,
+  //new PySparkRunnerTests,
   new RunnersLoadingTests,
   new SparkSqlRunnerTests,
   new SparkScalaRunnerTests
@@ -54,26 +54,26 @@
     val resources = new File(getClass.getResource("/spark_intp.py").getPath).getParent
     val workDir = new File(resources).getParentFile.getParent
 
-    env = Environment()
-    env.workingDir = s"file://$workDir"
+    env = new Environment()
+    env.setWorkingDir(s"file://$workDir")
 
-    env.master = "local[1]"
-    if (env.configuration != null) env.configuration ++ "pysparkPath" -> "/usr/bin/python" else env.configuration = Map(
+    env.setMaster("local[1]")
+    if (env.getConfiguration != null) env.setConfiguration(Map("pysparkPath" -> "/usr/bin/python").asJava) else env.setConfiguration( Map(
       "pysparkPath" -> "/usr/bin/python",
       "cwd" -> resources
-    )
+    ).asJava)
 
     val excEnv = Map[String, Any](
       "PYTHONPATH" -> resources
     )
     createTestMiniconda()
-    env.configuration ++ "spark_exec_env" -> excEnv
-    factory = ProvidersFactory(ExecData(env,
-      Dependencies(ListBuffer.empty[Repo], List.empty[Artifact]),
-      PythonDependencies(List.empty[PythonPackage]),
+    env.setConfiguration(Map( "spark_exec_env" -> excEnv).asJava)
+    factory = ProvidersFactory(new ExecData(env,
+      new Dependencies(ListBuffer.empty[Repo].asJava, List.empty[Artifact].asJava),
+      new PythonDependencies(List.empty[PythonPackage].asJava),
       Map(
-        "spark" -> Map.empty[String, Any],
-        "spark_exec_env" -> Map("PYTHONPATH" -> resources))),
+        "spark" -> Map.empty[String, Any].asJava,
+        "spark_exec_env" -> Map("PYTHONPATH" -> resources).asJava).asJava),
       "test",
       new ByteArrayOutputStream(),
       new TestNotifier(),
diff --git a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunnerTests.scala b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunnerTests.scala
index 90b0122..011c4d9 100755
--- a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunnerTests.scala
+++ b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/repl/SparkScalaRunnerTests.scala
@@ -43,7 +43,7 @@
 
     val sparkRunner =factory.getRunner("spark", "scala").get.asInstanceOf[SparkScalaRunner]
     val script = getClass.getResource("/step-2.scala").getPath
-    sparkRunner.env.workingDir = s"${getClass.getResource("/tmp").getPath}"
+    sparkRunner.env.setWorkingDir(s"${getClass.getResource("/tmp").getPath}")
     AmaContext.init(sparkRunner.spark,"job",sparkRunner.env)
     val sourceCode = Source.fromFile(script).getLines().mkString("\n")
     sparkRunner.executeSource(sourceCode, "cont", Map.empty[String, String].asJava)
diff --git a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunnerTests.scala b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunnerTests.scala
index f189580..756bed9 100644
--- a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunnerTests.scala
+++ b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/frameworks/spark/runner/sparksql/SparkSqlRunnerTests.scala
@@ -52,10 +52,10 @@
     //Prepare test dataset
     val inputDf = spark.read.parquet(getClass.getResource("/SparkSql/parquet").getPath)
 
-    inputDf.write.mode(SaveMode.Overwrite).parquet(s"${env.workingDir}/${sparkSql.jobId}/sparksqldefaultparquetjobaction/sparksqldefaultparquetjobactiontempdf")
+    inputDf.write.mode(SaveMode.Overwrite).parquet(s"${env.getWorkingDir}/${sparkSql.jobId}/sparksqldefaultparquetjobaction/sparksqldefaultparquetjobactiontempdf")
     sparkSql.executeSource("select * FROM AMACONTEXT_sparksqldefaultparquetjobaction_sparksqldefaultparquetjobactiontempdf where age=22", "sql_parquet_test", Map("result" -> "parquet").asJava)
 
-    val outputDf = spark.read.parquet(s"${env.workingDir}/${sparkSql.jobId}/sql_parquet_test/result")
+    val outputDf = spark.read.parquet(s"${env.getWorkingDir}/${sparkSql.jobId}/sql_parquet_test/result")
     println("Output Default Parquet: " + inputDf.count + "," + outputDf.first().getString(1))
     outputDf.first().getString(1) shouldEqual "Michael"
   }
@@ -71,10 +71,10 @@
 
     //Prepare test dataset
     val inputDf = spark.read.parquet(getClass.getResource("/SparkSql/parquet").getPath)
-    inputDf.write.mode(SaveMode.Overwrite).parquet(s"${env.workingDir}/${sparkSql.jobId}/sparksqlparquetjobaction/sparksqlparquetjobactiontempdf")
+    inputDf.write.mode(SaveMode.Overwrite).parquet(s"${env.getWorkingDir}/${sparkSql.jobId}/sparksqlparquetjobaction/sparksqlparquetjobactiontempdf")
     sparkSql.executeSource("select * FROM AMACONTEXT_sparksqlparquetjobaction_sparksqlparquetjobactiontempdf READAS parquet", "sql_parquet_test", Map("result2" -> "parquet").asJava)
 
-    val outputDf = spark.read.parquet(s"${env.workingDir}/${sparkSql.jobId}/sql_parquet_test/result2")
+    val outputDf = spark.read.parquet(s"${env.getWorkingDir}/${sparkSql.jobId}/sql_parquet_test/result2")
     println("Output Parquet: " + inputDf.count + "," + outputDf.count)
     inputDf.first().getString(1) shouldEqual outputDf.first().getString(1)
   }
@@ -92,10 +92,10 @@
     //Prepare test dataset
     val inputDf = spark.read.json(getClass.getResource("/SparkSql/json").getPath)
 
-    inputDf.write.mode(SaveMode.Overwrite).json(s"${env.workingDir}/${sparkSql.jobId}/sparksqljsonjobaction/sparksqljsonjobactiontempdf")
+    inputDf.write.mode(SaveMode.Overwrite).json(s"${env.getWorkingDir}/${sparkSql.jobId}/sparksqljsonjobaction/sparksqljsonjobactiontempdf")
     sparkSql.executeSource("select * FROM AMACONTEXT_sparksqljsonjobaction_sparksqljsonjobactiontempdf  where age='30' READAS json", "sql_json_test", Map("result" -> "json").asJava)
 
-    val outputDf = spark.read.json(s"${env.workingDir}/${sparkSql.jobId}/sql_json_test/result")
+    val outputDf = spark.read.json(s"${env.getWorkingDir}/${sparkSql.jobId}/sql_json_test/result")
     println("Output JSON: " + inputDf.count + "," + outputDf.count)
     outputDf.first().getString(1) shouldEqual "Kirupa"
 
@@ -112,11 +112,11 @@
 
     //Prepare test dataset
     val inputDf = spark.read.csv(getClass.getResource("/SparkSql/csv").getPath)
-    inputDf.write.mode(SaveMode.Overwrite).csv(s"${env.workingDir}/${sparkSql.jobId}/sparksqlcsvjobaction/sparksqlcsvjobactiontempdf")
+    inputDf.write.mode(SaveMode.Overwrite).csv(s"${env.getWorkingDir}/${sparkSql.jobId}/sparksqlcsvjobaction/sparksqlcsvjobactiontempdf")
     sparkSql.executeSource("select * FROM AMACONTEXT_sparksqlcsvjobaction_sparksqlcsvjobactiontempdf READAS csv", "sql_csv_test", Map("result" -> "csv").asJava)
 
 
-    val outputDf = spark.read.csv(s"${env.workingDir}/${sparkSql.jobId}/sql_csv_test/result")
+    val outputDf = spark.read.csv(s"${env.getWorkingDir}/${sparkSql.jobId}/sql_csv_test/result")
     println("Output CSV: " + inputDf.count + "," + outputDf.count)
     inputDf.first().getString(1) shouldEqual outputDf.first().getString(1)
   }
diff --git a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/utilities/TestNotifier.scala b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/utilities/TestNotifier.scala
index 430e75a..6a9e9f0 100644
--- a/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/utilities/TestNotifier.scala
+++ b/frameworks/spark/runner/src/test/scala/org/apache/amaterasu/utilities/TestNotifier.scala
@@ -23,14 +23,14 @@
 class TestNotifier extends Notifier  {
 
   override def info(msg: String): Unit = {
-    log.info(msg)
+    getLog.info(msg)
   }
 
   override def success(line: String): Unit = {
-    log.info(s"successfully executed line: $line")
+    getLog.info(s"successfully executed line: $line")
   }
 
   override def error(line: String, msg: String): Unit = {
-    log.error(s"Error executing line: $line message: $msg")
+    getLog.error(s"Error executing line: $line message: $msg")
   }
 }
diff --git a/frameworks/spark/runtime/src/main/scala/org/apache/amaterasu/frameworks/spark/runtime/AmaContext.scala b/frameworks/spark/runtime/src/main/scala/org/apache/amaterasu/frameworks/spark/runtime/AmaContext.scala
index fc9fb94..930927c 100644
--- a/frameworks/spark/runtime/src/main/scala/org/apache/amaterasu/frameworks/spark/runtime/AmaContext.scala
+++ b/frameworks/spark/runtime/src/main/scala/org/apache/amaterasu/frameworks/spark/runtime/AmaContext.scala
@@ -40,7 +40,7 @@
   }
 
   def getDataFrame(actionName: String, dfName: String, format: String = "parquet"): DataFrame = {
-    spark.read.format(format).load(s"${env.workingDir}/$jobId/$actionName/$dfName")
+    spark.read.format(format).load(s"${env.getWorkingDir}/$jobId/$actionName/$dfName")
   }
 
   def getDataset[T: Encoder](actionName: String, dfName: String, format: String = "parquet"): Dataset[T] = {
diff --git a/leader-common/build.gradle b/leader-common/build.gradle
index 6ca9513..4254899 100644
--- a/leader-common/build.gradle
+++ b/leader-common/build.gradle
@@ -66,14 +66,22 @@
     compile 'org.scala-lang:scala-library:2.11.8'
 
     compile project(':common')
+    compile project(':amaterasu-sdk')
 
     compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-scala_2.11', version: '2.6.3'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.6.4'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.6.4'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.6.4'
-    compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: '2.6.4'
-
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-kotlin', version: '2.9.8'
+    
+    compile group: 'org.reflections', name: 'reflections', version: '0.9.11'
     compile group: 'org.eclipse.jgit', name: 'org.eclipse.jgit', version: '4.2.0.201601211800-r'
+    compile group: 'org.apache.activemq', name: 'activemq-broker', version: '5.15.3'
+    runtime group: 'org.apache.activemq', name: 'activemq-kahadb-store', version: '5.15.3'
+    compile group: 'com.importre', name: 'crayon', version: '0.1.0'
+    compile group: 'com.beust', name: 'klaxon', version: '5.0.1'
+    compile group: 'com.github.ajalt', name: 'clikt', version: '1.6.0'
 
     compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
     compile "org.jetbrains.kotlin:kotlin-reflect"
@@ -83,9 +91,10 @@
 
     testCompile 'org.jetbrains.spek:spek-api:1.1.5'
     testCompile "org.jetbrains.kotlin:kotlin-test-junit:$kotlin_version"
+    testCompile 'org.apache.curator:curator-test:2.9.1'
     testRuntime 'org.jetbrains.spek:spek-junit-platform-engine:1.1.5'
 
-    // spek requires kotlin-reflect, can be omitted if already in the classpath
+    // Spek requires kotlin-reflect, can be omitted if already in the classpath
     testRuntimeOnly "org.jetbrains.kotlin:kotlin-reflect:$kotlin_version"
 
 }
@@ -94,7 +103,6 @@
     test {
         resources.srcDirs += [file('src/test/resources')]
     }
-
 }
 
 compileKotlin {
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManager.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManager.kt
index 2f98fa6..00b640a 100644
--- a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManager.kt
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManager.kt
@@ -18,9 +18,10 @@
 
 import com.uchuhimo.konf.Config
 import com.uchuhimo.konf.source.yaml.toYaml
+import org.apache.amaterasu.common.logging.KLogging
 import java.io.File
 
-class ConfigManager(private val env: String, private val repoPath: String, private val frameworkItems: List<String> = emptyList()) {
+class ConfigManager(private val env: String, private val repoPath: String, private val frameworkItems: List<String> = emptyList()): KLogging() {
 
     private val envFolder = "$repoPath/env/$env"
 
@@ -34,6 +35,7 @@
     }
 
     init {
+        log.info("environment folder is $envFolder")
         for (file in File(envFolder).listFiles()) {
             config = config.from.yaml.file(file)
             println(config.toYaml.toText())
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/dsl/JobParser.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/dsl/JobParser.kt
new file mode 100644
index 0000000..4401251
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/dsl/JobParser.kt
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.dsl
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.databind.node.ArrayNode
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.dataobjects.Artifact
+import org.apache.amaterasu.common.dataobjects.Repo
+import org.apache.amaterasu.leader.common.execution.JobManager
+import org.apache.amaterasu.leader.common.execution.actions.Action
+import org.apache.amaterasu.leader.common.execution.actions.ErrorAction
+import org.apache.amaterasu.leader.common.execution.actions.SequentialAction
+import org.apache.curator.framework.CuratorFramework
+import java.io.File
+import java.util.concurrent.BlockingQueue
+
+object JobParser {
+
+    @JvmStatic
+    fun loadMakiFile(): String = File("repo/maki.yml").readText(Charsets.UTF_8)
+
+    /**
+     * Parses the maki.yml string and creates a job manager
+     *
+     * @param jobId
+     * @param maki a string containing the YAML definition of the job
+     * @param actionsQueue
+     * @param client
+     * @return
+     */
+    @JvmStatic
+    fun parse(jobId: String,
+              maki: String,
+              actionsQueue: BlockingQueue<ActionData>,
+              client: CuratorFramework,
+              attempts: Int): JobManager {
+
+        val mapper = ObjectMapper(YAMLFactory())
+
+        val job = mapper.readTree(maki)
+
+        // loading the job details
+        val manager = JobManager(job.path("job-name").asText(), jobId, actionsQueue, client)
+
+        // iterating the flow list and constructing the job's flow
+        val actions = (job.path("flow") as ArrayNode).toList()
+
+        parseActions(actions, manager, actionsQueue, attempts, null)
+
+        return manager
+    }
+
+    @JvmStatic
+    fun parseActions(actions: List<JsonNode>,
+                     manager: JobManager,
+                     actionsQueue: BlockingQueue<ActionData>,
+                     attempts: Int,
+                     previous: Action?) {
+
+
+        if (actions.isEmpty())
+            return
+
+        val actionData = actions.first()
+
+        val action = parseSequentialAction(
+                actionData,
+                manager.jobId,
+                actionsQueue,
+                manager.client,
+                attempts
+        )
+
+        //updating the list of frameworks setup
+        manager[action.data.groupId] = action.data.typeId
+
+
+        if (!manager.isInitialized) {
+            manager.head = action
+        }
+
+        previous?.let {
+            previous.data.nextActionIds.add(action.actionId)
+        }
+        manager.registerAction(action)
+
+        val errorNode = actionData.path("error")
+
+        if (!errorNode.isMissingNode) {
+
+            val errorAction = parseErrorAction(
+                    errorNode,
+                    manager.jobId,
+                    action.data.id,
+                    actionsQueue,
+                    manager.client
+            )
+
+            action.data.errorActionId = errorAction.data.id
+            manager.registerAction(errorAction)
+
+            //updating the list of frameworks setup
+            manager[errorAction.data.groupId] = errorAction.data.typeId
+        }
+
+        parseActions(actions.drop(1), manager, actionsQueue, attempts, action)
+
+    }
+
+    @JvmStatic
+    fun parseSequentialAction(action: JsonNode,
+                              jobId: String,
+                              actionsQueue: BlockingQueue<ActionData>,
+                              client: CuratorFramework,
+                              attempts: Int): SequentialAction {
+
+        val result = SequentialAction(action.path("name").asText(),
+                action.path("file").asText(),
+                action.path("config").asText(),
+                action.path("runner").path("group").asText(),
+                action.path("runner").path("type").asText(),
+                action.path("exports").fields().asSequence().map { it.key to it.value.asText() }.toMap(),
+                jobId,
+                actionsQueue,
+                client,
+                attempts)
+
+        if(!action.path("artifact").isMissingNode){
+            result.data.artifact = parseArtifact(action)
+            result.data.entryClass = action.path("class").asText()
+        }
+
+        if(!action.path("repo").isMissingNode){
+            result.data.repo = parseRepo(action)
+        }
+
+        return result
+    }
+
+    private fun parseRepo(action: JsonNode): Repo {
+        return Repo(
+                action.path("repo").path("id").asText(),
+                action.path("repo").path("type").asText(),
+                action.path("repo").path("url").asText())
+    }
+
+    private fun parseArtifact(action: JsonNode): Artifact {
+        return Artifact(
+                action.path("artifact").path("groupId").asText(),
+                action.path("artifact").path("artifactId").asText(),
+                action.path("artifact").path("version").asText())
+    }
+
+    @JvmStatic
+    fun parseErrorAction(action: JsonNode,
+                         jobId: String,
+                         parent: String,
+                         actionsQueue: BlockingQueue<ActionData>,
+                         client: CuratorFramework): ErrorAction {
+
+        val result = ErrorAction(
+                action.path("name").asText(),
+                action.path("file").asText(),
+                parent,
+                action.path("config").asText(),
+                action.path("runner").path("group").asText(),
+                action.path("runner").path("type").asText(),
+
+                jobId,
+                actionsQueue,
+                client
+        )
+
+        if(!action.path("artifact").isMissingNode){
+            result.data.artifact = parseArtifact(action)
+        }
+
+        if(!action.path("repo").isMissingNode){
+            result.data.repo = parseRepo(action)
+        }
+
+        return result
+
+    }
+
+}
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobLoader.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobLoader.kt
new file mode 100644
index 0000000..f8a9a57
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobLoader.kt
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.logging.KLogging
+import org.apache.amaterasu.leader.common.dsl.GitUtil
+import org.apache.amaterasu.leader.common.dsl.JobParser
+import org.apache.curator.framework.CuratorFramework
+import org.apache.zookeeper.CreateMode
+import java.util.concurrent.BlockingQueue
+
+object JobLoader : KLogging() {
+
+    fun loadJob(src: String, branch: String, jobId: String, client: CuratorFramework, attempts: Int, actionsQueue: BlockingQueue<ActionData>): JobManager {
+
+        // creating the jobs znode and storing the source repo and branch
+        client.create().withMode(CreateMode.PERSISTENT).forPath("/$jobId")
+        client.create().withMode(CreateMode.PERSISTENT).forPath("/$jobId/repo", src.toByteArray())
+        client.create().withMode(CreateMode.PERSISTENT).forPath("/$jobId/branch", branch.toByteArray())
+
+        val maki: String = loadMaki(src, branch)
+
+        return createJobManager(maki, jobId, client, attempts, actionsQueue)
+
+    }
+
+    fun createJobManager(maki: String, jobId: String, client: CuratorFramework, attempts: Int, actionsQueue: BlockingQueue<ActionData>): JobManager {
+
+        return JobParser.parse(
+                jobId,
+                maki,
+                actionsQueue,
+                client,
+                attempts
+        )
+    }
+
+    fun loadMaki(src: String, branch: String): String {
+
+        // cloning the git repo
+        log.debug("getting repo: $src, for branch $branch")
+        GitUtil.cloneRepo(src, branch)
+
+        // parsing the maki.yaml and creating a JobManager to
+        // coordinate the workflow based on the file
+        val maki = JobParser.loadMakiFile()
+        return maki
+    }
+
+    fun reloadJob(jobId: String, client: CuratorFramework, attempts: Int, actionsQueue: BlockingQueue<ActionData>): JobManager {
+
+        //val jobState = client.getChildren.forPath(s"/$jobId")
+        val src = String(client.data.forPath("/$jobId/repo"))
+        val branch = String(client.data.forPath("/$jobId/branch"))
+
+        val maki: String = loadMaki(src, branch)
+
+        val jobManager: JobManager = createJobManager(maki, jobId, client, attempts, actionsQueue)
+        restoreJobState(jobManager, jobId, client)
+
+        jobManager.start()
+        return jobManager
+    }
+
+    fun restoreJobState(jobManager: JobManager, jobId: String, client: CuratorFramework): Unit {
+
+        val tasks = client.children.forPath("/$jobId").filter { it.startsWith("task") }
+
+        for (task in tasks) {
+
+            val status = ActionStatus.valueOf(String(client.data.forPath("/$jobId/$task")))
+            if (status == ActionStatus.Queued || status == ActionStatus.Started) {
+                jobManager.reQueueAction(task.substring(task.indexOf("task-") + 5))
+            }
+
+        }
+
+    }
+
+
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobManager.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobManager.kt
new file mode 100644
index 0000000..1c69ee7
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/JobManager.kt
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.logging.KLogging
+import org.apache.amaterasu.leader.common.execution.actions.Action
+import org.apache.curator.framework.CuratorFramework
+import java.util.concurrent.BlockingQueue
+
+data class JobManager(var name: String = "",
+                      var jobId: String = "",
+                      var executionQueue: BlockingQueue<ActionData>,
+                      var client: CuratorFramework) : KLogging() {
+
+    override fun toString(): String {
+        val result = StringBuilder()
+        return result.toString()
+    }
+
+    lateinit var head: Action
+
+    // TODO: this is not private due to tests, fix this!!!
+    val registeredActions = HashMap<String, Action>()
+    private val frameworks = HashMap<String, HashSet<String>>()
+
+    operator fun set(groupId: String, typeId: String) = frameworks.getOrPut(groupId) { HashSet() }.add(typeId)
+
+    /**
+     * The start method initiates the job execution by executing the first action.
+     * start mast be called once and by the JobManager only
+     */
+    fun start(): Unit = head.execute()
+
+    val outOfActions: Boolean
+        get() {
+            return !(registeredActions.values.map { it.data.status }.contains(ActionStatus.Pending)) &&
+                    !(registeredActions.values.map { it.data.status }.contains(ActionStatus.Queued)) &&
+                    !(registeredActions.values.map { it.data.status }.contains(ActionStatus.Started))
+        }
+
+    /**
+     * getNextActionData returns the data of the next action to be executed if such action
+     * exists
+     *
+     * @return the ActionData of the next action, returns null if no such action exists
+     */
+    val nextActionData: ActionData?
+        get() {
+
+            val nextAction: ActionData? = executionQueue.poll()
+
+            if (nextAction != null) {
+                registeredActions[nextAction.id]?.announceStart()
+            }
+
+            return nextAction
+        }
+
+    fun reQueueAction(actionId: String) {
+
+        log.info("requeing action $actionId")
+        registeredActions.forEach { log.info("key ${it.key}") }
+
+        val action: Action = registeredActions[actionId] ?: throw IllegalAccessException()
+        executionQueue.put(action.data)
+        registeredActions[actionId]!!.announceQueued()
+
+    }
+
+    /**
+     * Registers an action with the job
+     *
+     * @param action
+     */
+    fun registerAction(action: Action) {
+        registeredActions[action.actionId] = action
+    }
+
+    /**
+     * announce the completion of an action and executes the next actions
+     *
+     * @param actionId
+     */
+    fun actionComplete(actionId: String) {
+        val action = registeredActions[actionId]
+        action?.let {
+
+            it.announceComplete()
+
+            action.data.nextActionIds.forEach { id -> registeredActions[id]!!.execute() }
+
+            // we don't need the error action anymore
+            if (it.data.hasErrorAction)
+                registeredActions[action.data.errorActionId]!!.announceCanceled()
+        }
+
+    }
+
+    /**
+     * gets the next action id which can be either the same action or an error action
+     * and if it exist (we have an error action or a retry)
+     *
+     * @param actionId
+     */
+    fun actionFailed(actionId: String, message: String) {
+
+        log.warn(message)
+
+        val action = registeredActions[actionId]
+        val id = action!!.handleFailure(message)
+        if (!id.isEmpty())
+            registeredActions[id]?.execute()
+
+        //delete all future actions
+        cancelFutureActions(action)
+    }
+
+    fun cancelFutureActions(action: Action) {
+
+        if (action.data.status != ActionStatus.Failed)
+            action.announceCanceled()
+
+        action.data.nextActionIds.forEach { id ->
+            val registeredAction = registeredActions[id]
+            if (registeredAction != null) {
+                cancelFutureActions(registeredAction)
+            }
+        }
+    }
+
+    /**
+     * announce the start of execution of the action
+     */
+    fun actionStarted(actionId: String) {
+
+        val action = registeredActions[actionId]
+        action?.announceStart()
+
+    }
+
+    fun actionsCount(): Int = executionQueue.size
+
+    val isInitialized: Boolean
+        get() = ::head.isInitialized
+}
+
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/Action.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/Action.kt
index 9b1cac3..34386aa 100644
--- a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/Action.kt
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/Action.kt
@@ -19,7 +19,6 @@
 import org.apache.amaterasu.common.dataobjects.ActionData
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 import org.apache.amaterasu.common.logging.KLogging
-import org.apache.amaterasu.common.logging.Logging
 import org.apache.curator.framework.CuratorFramework
 
 /**
@@ -35,8 +34,8 @@
 
     fun announceStart() {
         log.debug("Starting action ${data.name} of group ${data.groupId} and type ${data.typeId}")
-        client.setData().forPath(actionPath, ActionStatus.started.value.toByteArray())
-        data.status = ActionStatus.started
+        client.setData().forPath(actionPath, ActionStatus.Started.value.toByteArray())
+        data.status = ActionStatus.Started
     }
 
     fun announceRunning() {
@@ -46,21 +45,21 @@
     }
 
     fun announceQueued() {
-        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} is queued for execution")
-        client.setData().forPath(actionPath, ActionStatus.queued.value.toByteArray())
-        data.status = ActionStatus.queued
+        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} is Queued for execution")
+        client.setData().forPath(actionPath, ActionStatus.Queued.value.toByteArray())
+        data.status = ActionStatus.Queued
     }
 
     fun announceComplete() {
-        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} complete")
-        client.setData().forPath(actionPath, ActionStatus.complete.value.toByteArray())
-        data.status = ActionStatus.complete
+        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} Complete")
+        client.setData().forPath(actionPath, ActionStatus.Complete.value.toByteArray())
+        data.status = ActionStatus.Complete
     }
 
     fun announceCanceled() {
-        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} was canceled")
-        client.setData().forPath(actionPath, ActionStatus.canceled.value.toByteArray())
-        data.status = ActionStatus.canceled
+        log.debug("Action ${data.name} of group ${data.groupId} and of type ${data.typeId} was Canceled")
+        client.setData().forPath(actionPath, ActionStatus.Canceled.value.toByteArray())
+        data.status = ActionStatus.Canceled
     }
 
     protected fun announceFailure() {}
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/ErrorAction.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/ErrorAction.kt
new file mode 100644
index 0000000..4dd67e7
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/ErrorAction.kt
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution.actions
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.curator.framework.CuratorFramework
+import org.apache.zookeeper.CreateMode
+import java.util.concurrent.BlockingQueue
+
+class ErrorAction(name: String,
+                  src: String,
+                  parent: String,
+                  config: String,
+                  groupId: String,
+                  typeId: String,
+                  jobId: String,
+                  queue: BlockingQueue<ActionData>,
+                  zkClient: CuratorFramework) : SequentialActionBase() {
+
+    init {
+        jobsQueue = queue
+
+        // creating a znode for the action
+        client = zkClient
+        actionPath = client.create().withMode(CreateMode.PERSISTENT).forPath("/$jobId/task-$parent-error", ActionStatus.Pending.toString().toByteArray())
+        actionId = actionPath.substring(actionPath.indexOf('-') + 1).replace("/", "-")
+
+        this.jobId = jobId
+        data = ActionData(ActionStatus.Pending, name, src, config, groupId, typeId, actionId)
+        jobsQueue = queue
+        client = zkClient
+    }
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialAction.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialAction.kt
new file mode 100644
index 0000000..30b4998
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialAction.kt
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution.actions
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.curator.framework.CuratorFramework
+import org.apache.zookeeper.CreateMode
+import java.util.concurrent.BlockingQueue
+
+class SequentialAction(name: String,
+                       src: String,
+                       config: String,
+                       groupId: String,
+                       typeId: String,
+                       exports: Map<String, String>,
+                       jobId: String,
+                       queue: BlockingQueue<ActionData>,
+                       zkClient: CuratorFramework,
+                       attempts: Int): SequentialActionBase() {
+    init {
+        this.jobsQueue = queue
+
+        // creating a znode for the action
+        client = zkClient
+        actionPath = client.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath("/$jobId/task-", ActionStatus.Pending.toString().toByteArray())
+        actionId = actionPath.substring(actionPath.indexOf("task-") + 5)
+
+        this.attempts = attempts
+        this.jobId = jobId
+        val javaExports = exports
+        data = ActionData(ActionStatus.Pending, name, src, config, groupId, typeId, actionId, javaExports, arrayListOf())
+        jobsQueue = queue
+        client = zkClient
+
+    }
+}
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialActionBase.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialActionBase.kt
new file mode 100644
index 0000000..47d88a4
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/actions/SequentialActionBase.kt
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution.actions
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import java.util.concurrent.BlockingQueue
+
+open class SequentialActionBase : Action() {
+
+
+    var jobId: String = ""
+    lateinit var jobsQueue: BlockingQueue<ActionData>
+    var attempts: Int = 2
+    private var attempt: Int = 1
+
+    override fun execute() {
+
+        try {
+
+            announceQueued()
+            jobsQueue.add(data)
+
+        }
+        catch(e: Exception) {
+
+            //TODO: this will not invoke the error action
+            e.message?.let{ handleFailure(it) }
+
+        }
+
+    }
+
+    override fun handleFailure(message: String): String {
+
+        println("Part ${data.name} of group ${data.groupId} and of type ${data.typeId} Failed on attempt $attempt with message: $message")
+        attempt += 1
+
+        var result = ""
+        if (attempt <= attempts) {
+            result = data.id
+        }
+        else {
+            announceFailure()
+            if(data.hasErrorAction) {
+                println("===> moving to err action ${data.errorActionId}")
+                data.status = ActionStatus.Failed
+                result = data.errorActionId
+            }
+        }
+        return result
+    }
+
+}
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/frameworls/FrameworkProvidersFactory.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/frameworls/FrameworkProvidersFactory.kt
new file mode 100644
index 0000000..cf3e10d
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/execution/frameworls/FrameworkProvidersFactory.kt
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.execution.frameworls
+
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.common.logging.KLogging
+import org.apache.amaterasu.sdk.frameworks.FrameworkSetupProvider
+import org.reflections.Reflections
+
+class FrameworkProvidersFactory(val env: String, val config: ClusterConfig) : KLogging() {
+
+    var  providers: Map<String, FrameworkSetupProvider>
+
+    init {
+        val reflections =  Reflections(ClassLoader::class.java)
+        val runnerTypes = reflections.getSubTypesOf(FrameworkSetupProvider::class.java)
+
+
+        providers = runnerTypes.map  {
+
+
+            val provider = it.newInstance()
+
+            provider.init(env, config)
+            log.info("a provider for group ${provider.groupIdentifier} was created")
+
+            provider.groupIdentifier to provider
+
+        }.toMap()
+    }
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/AmaOpts.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/AmaOpts.kt
new file mode 100644
index 0000000..132bd77
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/AmaOpts.kt
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.launcher
+
+data class AmaOpts(
+        var repo: String = "",
+        var branch: String = "master",
+        var env: String = "default",
+        var name: String = "amaterasu-job",
+        var jobId: String = "",
+        var newJobId: String = "",
+        var report: String = "code",
+        var home: String = "") {
+
+    fun toCmdString(): String {
+
+        var cmd = " --repo $repo --branch $branch --env $env --name $name --report $report --home $home"
+        if (jobId.isNotEmpty()) {
+            cmd += " --job-id $jobId"
+        }
+        return cmd
+    }
+
+    override fun toString(): String {
+        return toCmdString()
+    }
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/ArgsParser.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/ArgsParser.kt
new file mode 100644
index 0000000..c745c4a
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/launcher/ArgsParser.kt
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.launcher
+
+import com.github.ajalt.clikt.core.CliktCommand
+import com.github.ajalt.clikt.parameters.options.default
+import com.github.ajalt.clikt.parameters.options.option
+import com.github.ajalt.clikt.parameters.options.prompt
+
+abstract class ArgsParser : CliktCommand() {
+
+     val repo: String by option("--repo", help = "The service address").prompt("Please provide an Amaterasu Reop")
+     val branch: String by option(help = "The branch to be executed (default is master)").default("master")
+     val env: String by option(help = "The environment to be executed (test, prod, etc. values from the default env are taken if np env specified)").default("default")
+     val name: String by option(help = "The name of the job").default("amaterasu-job")
+     val jobId: String by option("--job-id", help = "The jobId - should be passed only when resuming a job").default("")
+     val newJobId: String by option("--new-job-id", help = "The jobId - should never be passed by a user").default("")
+     val report: String by option(help = "The level of reporting").default("code")
+     val home: String by option(help = "").default("")
+
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/ActiveReportListener.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/ActiveReportListener.kt
new file mode 100644
index 0000000..6bda13c
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/ActiveReportListener.kt
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.utilities
+
+
+import com.beust.klaxon.Klaxon
+import com.importre.crayon.bold
+import com.importre.crayon.brightWhite
+import com.importre.crayon.green
+import com.importre.crayon.red
+import org.apache.amaterasu.common.execution.actions.Notification
+import org.apache.amaterasu.common.execution.actions.enums.NotificationType
+import javax.jms.Message
+import javax.jms.MessageListener
+import javax.jms.TextMessage
+
+class ActiveReportListener : MessageListener {
+
+    override fun onMessage(message: Message): Unit = when (message) {
+        is TextMessage -> try {
+            val notification = Klaxon().parse<Notification>(message.text)
+            notification?.let { printNotification(it) } ?: print("")
+
+        } catch (e: Exception) {
+            println(e.message)
+        }
+        else -> println("===> Unknown message")
+    }
+
+    private fun printNotification(notification: Notification) = when (notification.notType) {
+
+        NotificationType.Info ->
+            println("===> ${notification.msg} ".brightWhite().bold())
+        NotificationType.Success ->
+            println("===> ${notification.line}".green().bold())
+        NotificationType.Error -> {
+            println("===> ${notification.line}".red().bold())
+            println("===> ${notification.msg} ".red().bold())
+
+        }
+
+    }
+
+}
+
+
+
+
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/DataLoader.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/DataLoader.kt
new file mode 100644
index 0000000..dc76c62
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/DataLoader.kt
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.utilities
+
+import java.io.File
+import java.io.FileInputStream
+import java.nio.file.Files
+import java.nio.file.Paths
+
+import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
+import com.fasterxml.jackson.module.kotlin.KotlinModule
+import com.fasterxml.jackson.module.kotlin.readValue
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.common.dataobjects.TaskData
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.dataobjects.ExecData
+
+import org.apache.amaterasu.common.execution.dependencies.Dependencies
+import org.apache.amaterasu.common.execution.dependencies.PythonDependencies
+import org.apache.amaterasu.common.logging.KLogging
+
+import org.apache.amaterasu.common.runtime.Environment
+import org.yaml.snakeyaml.Yaml
+
+
+object DataLoader : KLogging() {
+
+    private val mapper = ObjectMapper()
+
+    private val ymlMapper = ObjectMapper(YAMLFactory())
+
+    init {
+        mapper.registerModule(KotlinModule())
+        ymlMapper.registerModule(KotlinModule())
+    }
+
+    @JvmStatic
+    fun getTaskDataBytes(actionData: ActionData, env: String): ByteArray {
+        return mapper.writeValueAsBytes(getTaskData(actionData, env))
+    }
+
+    @JvmStatic
+    fun getTaskData(actionData: ActionData, env: String): TaskData {
+        val srcFile = actionData.src
+        var src = ""
+
+        if (srcFile.isNotEmpty()) {
+            src = File("repo/src/$srcFile").readText()
+        }
+
+        val envValue = File("repo/env/$env/job.yml").readText()
+
+        val envData = ymlMapper.readValue<Environment>(envValue)
+
+        val exports = actionData.exports
+
+        return TaskData(src, envData, actionData.groupId, actionData.typeId, exports)
+    }
+
+    @JvmStatic
+    fun getTaskDataString(actionData: ActionData, env: String): String {
+        return mapper.writeValueAsString(getTaskData(actionData, env))
+    }
+
+    @JvmStatic
+    fun getExecutorDataBytes(env: String, clusterConf: ClusterConfig): ByteArray {
+        return mapper.writeValueAsBytes(getExecutorData(env, clusterConf))
+    }
+
+    @JvmStatic
+    fun getExecutorData(env: String, clusterConf: ClusterConfig): ExecData {
+
+        // loading the job configuration
+        val envValue = File("repo/env/$env/job.yml").readText() //TODO: change this to YAML
+        val envData = ymlMapper.readValue<Environment>(envValue)
+
+        // loading all additional configurations
+        val files = File("repo/env/$env/").listFiles().filter { it.isFile }.filter { it.name != "job.yml" }
+        val config = files.map { yamlToMap(it) }.toMap()
+
+        // loading the job's dependencies
+        var depsData: Dependencies? = null
+        var pyDepsData: PythonDependencies? = null
+
+        if (Files.exists(Paths.get("repo/deps/jars.yml"))) {
+            val depsValue = File("repo/deps/jars.yml").readText()
+            depsData = ymlMapper.readValue(depsValue)
+        }
+        if (Files.exists(Paths.get("repo/deps/python.yml"))) {
+            val pyDepsValue = File("repo/deps/python.yml").readText()
+            pyDepsData = ymlMapper.readValue(pyDepsValue)
+        }
+
+        return ExecData(envData, depsData, pyDepsData, config)
+    }
+
+    fun yamlToMap(file: File): Pair<String, Map<String, Any>> {
+
+        val yaml = Yaml()
+        val conf = yaml.load<Map<String, Any>>(FileInputStream(file))
+
+        return file.name.replace(".yml", "") to conf
+    }
+
+    @JvmStatic
+    fun getExecutorDataString(env: String, clusterConf: ClusterConfig): String {
+        return mapper.writeValueAsString(getExecutorData(env, clusterConf))
+    }
+
+}
\ No newline at end of file
diff --git a/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/MessagingClientUtil.kt b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/MessagingClientUtil.kt
new file mode 100644
index 0000000..7e21851
--- /dev/null
+++ b/leader-common/src/main/kotlin/org/apache/amaterasu/leader/common/utilities/MessagingClientUtil.kt
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.utilities
+
+import org.apache.activemq.ActiveMQConnectionFactory
+import java.net.InetAddress
+import java.net.ServerSocket
+import javax.jms.MessageConsumer
+import javax.jms.Session
+
+object MessagingClientUtil {
+
+    @JvmStatic
+    fun setupMessaging(brokerURL: String): MessageConsumer {
+
+        val cf = ActiveMQConnectionFactory(brokerURL)
+        val conn = cf.createConnection()
+        conn.start()
+
+        val session = conn.createSession(false, Session.AUTO_ACKNOWLEDGE)
+        //TODO: move to a const in common
+        val destination = session.createTopic("JOB.REPORT")
+
+        val consumer = session.createConsumer(destination)
+        consumer.setMessageListener(ActiveReportListener())
+
+        return consumer
+    }
+
+    private fun generatePort(): Int {
+        val socket = ServerSocket(0)
+        val port = socket.localPort
+        socket.close()
+        return port
+    }
+
+    @JvmStatic
+    val borkerAddress: String
+        get() {
+            val host: String = InetAddress.getLocalHost().hostName
+            return "tcp://$host:${MessagingClientUtil.generatePort()}"
+        }
+}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/execution/frameworks/FrameworkProvidersFactory.scala b/leader-common/src/main/scala/org/apache/amaterasu/leader/common/execution/frameworks/FrameworkProvidersFactory.scala
similarity index 94%
rename from leader/src/main/scala/org/apache/amaterasu/leader/execution/frameworks/FrameworkProvidersFactory.scala
rename to leader-common/src/main/scala/org/apache/amaterasu/leader/common/execution/frameworks/FrameworkProvidersFactory.scala
index 7630221..a748690 100644
--- a/leader/src/main/scala/org/apache/amaterasu/leader/execution/frameworks/FrameworkProvidersFactory.scala
+++ b/leader-common/src/main/scala/org/apache/amaterasu/leader/common/execution/frameworks/FrameworkProvidersFactory.scala
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu.leader.execution.frameworks
+package org.apache.amaterasu.leader.common.execution.frameworks
 
 import org.apache.amaterasu.common.configuration.ClusterConfig
 import org.apache.amaterasu.common.logging.Logging
@@ -51,6 +51,7 @@
 
       provider.init(env, config)
       log.info(s"a provider for group ${provider.getGroupIdentifier} was created")
+      log.info(s"config = $config")
       (provider.getGroupIdentifier, provider)
 
     }).toMap
diff --git a/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/DataLoader.scala b/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/DataLoader.scala
deleted file mode 100755
index e417905..0000000
--- a/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/DataLoader.scala
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.common.utilities
-import scala.collection.JavaConverters._
-import java.io.{File, FileInputStream}
-import java.nio.file.{Files, Paths}
-
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
-import com.fasterxml.jackson.module.scala.DefaultScalaModule
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.common.dataobjects.{ActionData, ExecData, TaskData}
-import org.apache.amaterasu.common.execution.dependencies.{Dependencies, PythonDependencies}
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.common.runtime.Environment
-import org.yaml.snakeyaml.Yaml
-
-import scala.collection.JavaConverters._
-import scala.collection.mutable
-import scala.io.Source
-
-
-object DataLoader extends Logging {
-
-  val mapper = new ObjectMapper()
-  mapper.registerModule(DefaultScalaModule)
-
-  val ymlMapper = new ObjectMapper(new YAMLFactory())
-  ymlMapper.registerModule(DefaultScalaModule)
-
-  def getTaskData(actionData: ActionData, env: String): TaskData = {
-    val srcFile = actionData.getSrc
-    val src = Source.fromFile(s"repo/src/$srcFile").mkString
-    val envValue = Source.fromFile(s"repo/env/$env/job.yml").mkString
-
-    val envData = ymlMapper.readValue(envValue, classOf[Environment])
-
-    val exports = actionData.getExports.asScala.toMap // Kotlin to Scala TODO: Remove me as fast as you can
-
-    TaskData(src, envData, actionData.getGroupId, actionData.getTypeId, exports)
-  }
-
-  def getTaskDataBytes(actionData: ActionData, env: String): Array[Byte] = {
-    mapper.writeValueAsBytes(getTaskData(actionData, env))
-  }
-
-  def getTaskDataString(actionData: ActionData, env: String): String = {
-    mapper.writeValueAsString(getTaskData(actionData, env))
-  }
-
-  def getExecutorData(env: String, clusterConf: ClusterConfig): ExecData = {
-
-    // loading the job configuration
-    val envValue = Source.fromFile(s"repo/env/$env/job.yml").mkString //TODO: change this to YAML
-    val envData = ymlMapper.readValue(envValue, classOf[Environment])
-    // loading all additional configurations
-    val files = new File(s"repo/env/$env/").listFiles().filter(_.isFile).filter(_.getName != "job.yml")
-    val config = files.map(yamlToMap).toMap
-    // loading the job's dependencies
-    var depsData: Dependencies = null
-    var pyDepsData: PythonDependencies = null
-    if (Files.exists(Paths.get("repo/deps/jars.yml"))) {
-      val depsValue = Source.fromFile(s"repo/deps/jars.yml").mkString
-      depsData = ymlMapper.readValue(depsValue, classOf[Dependencies])
-    }
-    if (Files.exists(Paths.get("repo/deps/python.yml"))) {
-      val pyDepsValue = Source.fromFile(s"repo/deps/python.yml").mkString
-      pyDepsData = ymlMapper.readValue(pyDepsValue, classOf[PythonDependencies])
-    }
-    val data = mapper.writeValueAsBytes(ExecData(envData, depsData, pyDepsData, config))
-    ExecData(envData, depsData, pyDepsData, config)
-  }
-
-  def getExecutorDataBytes(env: String, clusterConf: ClusterConfig): Array[Byte] = {
-    mapper.writeValueAsBytes(getExecutorData(env, clusterConf))
-  }
-
-  def getExecutorDataString(env: String, clusterConf: ClusterConfig): String = {
-    mapper.writeValueAsString(getExecutorData(env, clusterConf))
-  }
-
-  def yamlToMap(file: File): (String, Map[String, Any]) = {
-
-    val yaml = new Yaml()
-    val conf = yaml.load(new FileInputStream(file)).asInstanceOf[java.util.Map[String, Any]].asScala.toMap
-
-    (file.getName.replace(".yml",""), conf)
-  }
-
-}
-
-class ConfMap[String,  T <: ConfMap[String, T]] extends mutable.ListMap[String, Either[String, T]]
\ No newline at end of file
diff --git a/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/MemoryFormatParser.scala b/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/MemoryFormatParser.scala
index cabe0e5..c9df942 100644
--- a/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/MemoryFormatParser.scala
+++ b/leader-common/src/main/scala/org/apache/amaterasu/leader/common/utilities/MemoryFormatParser.scala
@@ -24,7 +24,7 @@
     if (lower.contains("mb")) {
       result = lower.replace("mb", "").toInt
     } else if (lower.contains("gb") | lower.contains("g")) {
-      result = lower.replace("g", "").replace("b","").toInt * 1024
+      result = lower.replace("g", "").replace("b", "").toInt * 1024
     } else {
       result = lower.toInt
     }
diff --git a/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManagerTests.kt b/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManagerTests.kt
index 1aa729b..0e87cae 100644
--- a/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManagerTests.kt
+++ b/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/configuration/ConfigManagerTests.kt
@@ -16,15 +16,16 @@
  */
 package org.apache.amaterasu.leader.common.configuration
 
-
 import org.jetbrains.spek.api.Spek
-import org.jetbrains.spek.api.dsl.*
-import kotlin.test.assertEquals
+import org.jetbrains.spek.api.dsl.given
+import org.jetbrains.spek.api.dsl.it
+import org.jetbrains.spek.api.dsl.on
 import java.io.File
+import kotlin.test.assertEquals
 
-object ConfigManagerTests : Spek({
+class ConfigManagerTests : Spek({
 
-    val marker = this.javaClass.getResource("/maki.yml").path
+    val marker = this.javaClass.getResource("/maki.yml")!!.path
 
     given("a ConfigManager for a job ") {
 
diff --git a/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/dsl/JobParserArtifactTests.kt b/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/dsl/JobParserArtifactTests.kt
new file mode 100644
index 0000000..95677c6
--- /dev/null
+++ b/leader-common/src/test/kotlin/org/apache/amaterasu/leader/common/dsl/JobParserArtifactTests.kt
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.common.dsl
+
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.curator.framework.CuratorFrameworkFactory
+import org.apache.curator.retry.ExponentialBackoffRetry
+import org.apache.curator.test.TestingServer
+import org.apache.zookeeper.CreateMode
+import org.jetbrains.spek.api.Spek
+import org.jetbrains.spek.api.dsl.given
+import org.jetbrains.spek.api.dsl.it
+import java.util.concurrent.LinkedBlockingQueue
+import kotlin.test.assert
+import kotlin.test.assertEquals
+
+
+/*
+this Spek tests how the JobParser handles artifacts and repositories
+ */
+object JobParserArtifactTests : Spek({
+
+    val retryPolicy =  ExponentialBackoffRetry(1000, 3)
+    val server = TestingServer(2182, true)
+    val client = CuratorFrameworkFactory.newClient(server.connectString, retryPolicy)
+    client.start()
+
+    val jobId = "job_${System.currentTimeMillis()}"
+    val yaml = this::class.java.getResource("/artifact-maki.yaml").readText()
+    val queue = LinkedBlockingQueue<ActionData>()
+
+    // this will be performed by the job bootstrapper
+    client.create().withMode(CreateMode.PERSISTENT).forPath("/$jobId")
+
+    given("a valid maki.yaml file ") {
+
+        val job = JobParser.parse(jobId, yaml, queue, client, 1)
+
+        it("loads the amaterasu-artifact-test job") {
+            assertEquals(job.name, "amaterasu-artifact-test")
+        }
+
+        it("also loads the artifact"){
+            assert( job.registeredActions["0000000000"]!!.data.hasArtifact)
+        }
+
+        it("also loads the repo"){
+            val repo = job.registeredActions["0000000000"]!!.data.repo
+
+            //TODO: replace with an ASF repo
+            assertEquals(repo.url, "https://packagecloud.io/yanivr/amaterasu-demo/maven2" )
+        }
+
+    }
+})
\ No newline at end of file
diff --git a/leader-common/src/test/resources/artifact-maki.yaml b/leader-common/src/test/resources/artifact-maki.yaml
new file mode 100644
index 0000000..c44bd7d
--- /dev/null
+++ b/leader-common/src/test/resources/artifact-maki.yaml
@@ -0,0 +1,42 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+---
+job-name: amaterasu-artifact-test
+flow:
+- name: start
+  runner:
+    group: spark
+    type: scala-jar
+  artifact:
+    groupId: io.shonto
+    artifactId: amaterasu-simple-spar
+    version: 1.0-SNAPSHOT
+  repo:
+    id: packagecloud
+    type: default
+    url: https://packagecloud.io/yanivr/amaterasu-demo/maven2
+  class: DataGenerator
+  exports:
+    odd: parquet
+- name: step2
+  runner:
+    group: spark
+    type: scala-jar
+  file: file2.scala
+...
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/leader-mesos/build.gradle
old mode 100755
new mode 100644
similarity index 92%
rename from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
rename to leader-mesos/build.gradle
index b7b0407..cb08d6a
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/leader-mesos/build.gradle
@@ -14,8 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
 
-package object leader {
 
-}
+
+
diff --git a/leader-yarn/build.gradle b/leader-yarn/build.gradle
new file mode 100644
index 0000000..dcea40f
--- /dev/null
+++ b/leader-yarn/build.gradle
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+buildscript {
+
+    repositories {
+        mavenCentral()
+        maven {
+            url 'http://repository.jetbrains.com/all'
+        }
+        maven {
+            url "https://jetbrains.jfrog.io/jetbrains/spek-snapshots"
+        }
+    }
+
+    dependencies {
+        classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
+        classpath 'org.junit.platform:junit-platform-gradle-plugin:1.0.0'
+    }
+}
+
+plugins {
+    id "com.github.johnrengelman.shadow" version "2.0.4"
+    id 'scala'
+}
+
+apply plugin: 'kotlin'
+apply plugin: 'org.junit.platform.gradle.plugin'
+
+junitPlatform {
+    filters {
+        engines {
+            include 'spek'
+        }
+    }
+}
+
+sourceCompatibility = 1.8
+targetCompatibility = 1.8
+
+shadowJar {
+    zip64 true
+}
+
+repositories {
+    maven { url "https://plugins.gradle.org/m2/" }
+    maven { url 'http://repository.jetbrains.com/all' }
+    maven { url "https://jetbrains.jfrog.io/jetbrains/spek-snapshots" }
+    maven { url "http://dl.bintray.com/jetbrains/spek" }
+    maven { url "http://oss.jfrog.org/artifactory/oss-snapshot-local" }
+
+    mavenCentral()
+    jcenter()
+}
+
+dependencies {
+    compile project(':leader-common')
+    compile project(':amaterasu-sdk')
+
+    compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
+    compile "org.jetbrains.kotlin:kotlin-reflect"
+    compile group: 'org.jetbrains.kotlinx', name: 'kotlinx-coroutines-core', version: '1.1.1'
+
+    testCompile 'org.jetbrains.spek:spek-api:1.1.5'
+    testCompile "org.jetbrains.kotlin:kotlin-test-junit:$kotlin_version"
+    testCompile 'org.apache.curator:curator-test:2.9.1'
+    testRuntime 'org.jetbrains.spek:spek-junit-platform-engine:1.1.5'
+
+    // Spek requires kotlin-reflect, can be omitted if already in the classpath
+    testRuntimeOnly "org.jetbrains.kotlin:kotlin-reflect:$kotlin_version"
+}
+
+task copyToHomeRoot(type: Copy) {
+    from 'src/main/scripts'
+    into '../build/amaterasu/'
+}
+
+task copyToHomeBin(type: Copy) {
+    dependsOn shadowJar
+    from 'build/libs'
+    into '../build/amaterasu/bin'
+}
+
+task copyToHome() {
+    dependsOn copyToHomeRoot
+    dependsOn copyToHomeBin
+}
+
+compileKotlin{
+    kotlinOptions.jvmTarget = "1.8"
+}
+
+compileTestKotlin {
+    kotlinOptions.jvmTarget = "1.8"
+}
diff --git a/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/AppMasterArgsParser.kt b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/AppMasterArgsParser.kt
new file mode 100644
index 0000000..112d935
--- /dev/null
+++ b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/AppMasterArgsParser.kt
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.yarn
+
+import org.apache.amaterasu.leader.common.launcher.AmaOpts
+import org.apache.amaterasu.leader.common.launcher.ArgsParser
+import org.apache.amaterasu.leader.common.utilities.MessagingClientUtil
+
+class AppMasterArgsParser: ArgsParser() {
+
+    override fun run() {
+
+        var opts = AmaOpts(repo, branch, env, name, jobId, newJobId, report, home)
+
+        val appMaster = ApplicationMaster()
+        appMaster.address = MessagingClientUtil.borkerAddress
+        println("broker address is ${appMaster.address}")
+        appMaster.broker.addConnector(appMaster.address)
+        appMaster.broker.start()
+
+        appMaster.execute(opts)
+    }
+}
diff --git a/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ApplicationMaster.kt b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ApplicationMaster.kt
new file mode 100644
index 0000000..3e8a7b6
--- /dev/null
+++ b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ApplicationMaster.kt
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.yarn
+
+import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
+import com.fasterxml.jackson.module.kotlin.KotlinModule
+
+import kotlinx.coroutines.async
+import kotlinx.coroutines.runBlocking
+
+import org.apache.activemq.broker.BrokerService
+
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.logging.KLogging
+import org.apache.amaterasu.leader.common.configuration.ConfigManager
+import org.apache.amaterasu.leader.common.execution.JobLoader
+import org.apache.amaterasu.leader.common.execution.JobManager
+import org.apache.amaterasu.leader.common.execution.frameworks.FrameworkProvidersFactory
+import org.apache.amaterasu.leader.common.launcher.AmaOpts
+import org.apache.amaterasu.leader.common.utilities.DataLoader
+import org.apache.amaterasu.leader.common.utilities.MessagingClientUtil
+import org.apache.amaterasu.sdk.frameworks.FrameworkSetupProvider
+import org.apache.amaterasu.sdk.frameworks.RunnerSetupProvider
+
+import org.apache.curator.framework.CuratorFramework
+import org.apache.curator.framework.CuratorFrameworkFactory
+import org.apache.curator.framework.recipes.barriers.DistributedBarrier
+import org.apache.curator.retry.ExponentialBackoffRetry
+
+import org.apache.hadoop.fs.FileSystem
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.io.DataOutputBuffer
+import org.apache.hadoop.security.UserGroupInformation
+import org.apache.hadoop.yarn.api.records.*
+import org.apache.hadoop.yarn.client.api.AMRMClient
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync
+import org.apache.hadoop.yarn.client.api.async.NMClientAsync
+import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl
+import org.apache.hadoop.yarn.conf.YarnConfiguration
+import org.apache.hadoop.yarn.exceptions.YarnException
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier
+import org.apache.hadoop.yarn.util.Records
+
+import org.apache.zookeeper.CreateMode
+
+import java.io.File
+import java.io.FileInputStream
+import java.io.IOException
+import java.nio.ByteBuffer
+import java.util.concurrent.ConcurrentHashMap
+import java.util.concurrent.ConcurrentLinkedQueue
+import java.util.concurrent.LinkedBlockingQueue
+
+import javax.jms.MessageConsumer
+
+class ApplicationMaster : KLogging(), AMRMClientAsync.CallbackHandler {
+
+
+    lateinit var address: String
+
+    val broker: BrokerService = BrokerService()
+    private val conf = YarnConfiguration()
+
+    private val nmClient: NMClientAsync = NMClientAsyncImpl(YarnNMCallbackHandler())
+    private val actionsBuffer = ConcurrentLinkedQueue<ActionData>()
+    private val completedContainersAndTaskIds = ConcurrentHashMap<Long, String>()
+    private val containersIdsToTask = ConcurrentHashMap<Long, ActionData>()
+    private val yamlMapper = ObjectMapper(YAMLFactory())
+
+    private lateinit var propPath: String
+    private lateinit var props: FileInputStream
+    private lateinit var jobManager: JobManager
+    private lateinit var zkClient: CuratorFramework
+    private lateinit var env: String
+    private lateinit var rmClient: AMRMClientAsync<AMRMClient.ContainerRequest>
+    private lateinit var frameworkFactory: FrameworkProvidersFactory
+    private lateinit var config: ClusterConfig
+    private lateinit var fs: FileSystem
+    private lateinit var consumer: MessageConsumer
+    private lateinit var configManager: ConfigManager
+
+    init {
+        yamlMapper.registerModule(KotlinModule())
+    }
+
+    fun execute(opts: AmaOpts) {
+
+        propPath = System.getenv("PWD") + "/amaterasu.properties"
+        props = FileInputStream(File(propPath))
+
+        // no need for HDFS double check (nod to Aaron Rodgers)
+        // jars on HDFS should have been verified by the YARN zkClient
+        config = ClusterConfig.apply(props)
+        fs = FileSystem.get(conf)
+
+        initJob(opts)
+
+        // now that the job was initiated, the curator zkClient is Started and we can
+        // register the broker's address
+        zkClient.create().withMode(CreateMode.PERSISTENT).forPath("/${jobManager.jobId}/broker")
+        zkClient.setData().forPath("/${jobManager.jobId}/broker", address.toByteArray())
+
+        // once the broker is registered, we can remove the barrier so clients can connect
+        log.info("/${jobManager.jobId}-report-barrier")
+        val barrier = DistributedBarrier(zkClient, "/${jobManager.jobId}-report-barrier")
+        barrier.removeBarrier()
+
+        consumer = MessagingClientUtil.setupMessaging(address)
+
+        log.info("number of messages ${broker.adminView.totalMessageCount}")
+
+        // Initialize clients to ResourceManager and NodeManagers
+        nmClient.init(conf)
+        nmClient.start()
+
+        rmClient = startRMClient()
+
+        val items = mutableListOf<FrameworkSetupProvider>()
+
+        for (p in frameworkFactory.providers().values()) {
+            items.add(p)
+        }
+        val configItems = items.flatMap { it.configurationItems.asIterable() }
+        configManager = ConfigManager(env, "repo", configItems)
+
+
+        val registrationResponse = rmClient.registerApplicationMaster("", 0, "")
+        val maxMem = registrationResponse.maximumResourceCapability.memorySize
+        val maxVCores = registrationResponse.maximumResourceCapability.virtualCores
+
+        while (!jobManager.outOfActions) {
+            val capability = Records.newRecord(Resource::class.java)
+
+            val actionData = jobManager.nextActionData
+            if (actionData != null) {
+
+                val frameworkProvider = frameworkFactory.getFramework(actionData.groupId)
+                val driverConfiguration = frameworkProvider.driverConfiguration
+
+                var mem: Long = driverConfiguration.memory.toLong()
+                mem = Math.min(mem, maxMem)
+                capability.memorySize = mem
+
+                var cpu = driverConfiguration.cpus
+                cpu = Math.min(cpu, maxVCores)
+                capability.virtualCores = cpu
+
+                createTaskConfiguration(actionData)
+                requestContainer(actionData, capability)
+
+            }
+        }
+
+        log.info("Finished requesting containers")
+        readLine()
+    }
+
+    private fun initJob(opts: AmaOpts) {
+
+        this.env = opts.env
+        frameworkFactory = FrameworkProvidersFactory.apply(env, config)
+
+        try {
+            val retryPolicy = ExponentialBackoffRetry(1000, 3)
+            zkClient = CuratorFrameworkFactory.newClient(config.zk(), retryPolicy)
+            zkClient.start()
+        } catch (e: Exception) {
+            log.error("Error connecting to zookeeper", e)
+            throw e
+        }
+
+        val zkPath = zkClient.checkExists().forPath("/${opts.newJobId}")
+
+        log.info("zkPath is $zkPath")
+        if (zkPath != null) {
+            log.info("resuming job" + opts.newJobId)
+            jobManager = JobLoader.reloadJob(
+                    opts.newJobId,
+                    zkClient,
+                    config.Jobs().tasks().attempts(),
+                    LinkedBlockingQueue<ActionData>())
+
+        } else {
+            log.info("new job is being created")
+            try {
+
+                jobManager = JobLoader.loadJob(
+                        opts.repo,
+                        opts.branch,
+                        opts.newJobId,
+                        zkClient,
+                        config.Jobs().tasks().attempts(),
+                        LinkedBlockingQueue<ActionData>())
+            } catch (e: Exception) {
+                log.error("Error creating JobManager.", e)
+                throw e
+            }
+
+        }
+
+        jobManager.start()
+        log.info("Started jobManager")
+    }
+
+    override fun onContainersAllocated(containers: MutableList<Container>?) = runBlocking {
+        containers?.let {
+            for (container in it) {
+
+                log.info("container ${container.id} allocated")
+                if (actionsBuffer.isNotEmpty()) {
+                    val actionData = actionsBuffer.poll()
+                    val cd = async {
+                        log.info("container ${container.id} allocated")
+
+                        val framework = frameworkFactory.getFramework(actionData.groupId)
+                        val runnerProvider = framework.getRunnerProvider(actionData.typeId)
+                        val ctx = Records.newRecord(ContainerLaunchContext::class.java)
+                        val commands: List<String> = listOf(runnerProvider.getCommand(jobManager.jobId, actionData, env, "${actionData.id}-${container.id.containerId}", address))
+
+                        log.info("container command ${commands.joinToString(prefix = " ", postfix = " ")}")
+                        ctx.commands = commands
+                        ctx.tokens = allTokens()
+                        ctx.localResources = setupContainerResources(framework, runnerProvider, actionData)
+                        ctx.environment = framework.environmentVariables
+
+                        nmClient.startContainerAsync(container, ctx)
+
+                        jobManager.actionStarted(actionData.id)
+                        containersIdsToTask[container.id.containerId] = actionData
+                        log.info("launching container succeeded: ${container.id.containerId}; task: ${actionData.id}")
+                    }
+                }
+            }
+        }
+    }!!
+
+    private fun allTokens(): ByteBuffer {
+        // creating the credentials for container execution
+        val credentials = UserGroupInformation.getCurrentUser().credentials
+        val dob = DataOutputBuffer()
+        credentials.writeTokenStorageToStream(dob)
+
+        // removing the AM->RM token so that containers cannot access it.
+        val iter = credentials.allTokens.iterator()
+        log.info("Executing with tokens:")
+        for (token in iter) {
+            log.info(token.toString())
+            if (token.kind == AMRMTokenIdentifier.KIND_NAME) iter.remove()
+        }
+        return ByteBuffer.wrap(dob.data, 0, dob.length)
+    }
+
+    /**
+     * Creates the map of resources to be copied into the container
+     * @framework The frameworkSetupProvider for the action
+     * @runnerProvider the actions runner provider
+     */
+    private fun setupContainerResources(framework: FrameworkSetupProvider, runnerProvider: RunnerSetupProvider, actionData: ActionData): Map<String, LocalResource> {
+
+        val yarnJarPath = Path(config.yarn().hdfsJarsPath())
+
+        // Getting framework (group) resources
+        val result = framework.groupResources.map { it.path to createLocalResourceFromPath(Path.mergePaths(yarnJarPath, createDistPath(it.path))) }.toMap().toMutableMap()
+
+        // Getting runner resources
+        result.putAll(runnerProvider.runnerResources.map { it to createLocalResourceFromPath(Path.mergePaths(yarnJarPath, createDistPath(it))) }.toMap())
+
+        // getting the action specific resources
+        result.putAll(runnerProvider.getActionResources(jobManager.jobId, actionData).map { it.removePrefix("${jobManager.jobId}/${actionData.name}/") to createLocalResourceFromPath(Path.mergePaths(yarnJarPath, createDistPath(it))) })
+
+        // getting the action specific dependencies
+        runnerProvider.getActionDependencies(jobManager.jobId, actionData).forEach { distributeFile(it, "${jobManager.jobId}/${actionData.name}/") }
+        result.putAll(runnerProvider.getActionDependencies(jobManager.jobId, actionData).map { File(it).name to createLocalResourceFromPath(Path.mergePaths(yarnJarPath, createDistPath("${jobManager.jobId}/${actionData.name}/$it"))) })
+
+        // Adding the Amaterasu configuration files
+        result["amaterasu.properties"] = createLocalResourceFromPath(Path.mergePaths(yarnJarPath, Path("/amaterasu.properties")))
+        result["log4j.properties"] = createLocalResourceFromPath(Path.mergePaths(yarnJarPath, Path("/log4j.properties")))
+
+        // getting the action executable
+        val executable = runnerProvider.getActionExecutable(jobManager.jobId, actionData)
+
+        // setting the action executable
+        distributeFile(executable, "${jobManager.jobId}/${actionData.name}/")
+        result[File(executable).name] = createLocalResourceFromPath(Path.mergePaths(yarnJarPath, createDistPath("${jobManager.jobId}/${actionData.name}/$executable")))
+
+        result.forEach { println("entry ${it.key} with value ${it.value}") }
+        return result.map { x -> x.key.removePrefix("/") to x.value }.toMap()
+    }
+
+    private fun createTaskConfiguration(actionData: ActionData) {
+
+        // setting up the configuration files for the container
+        val envYaml = configManager.getActionConfigContent(actionData.name, actionData.config)
+        writeConfigFile(envYaml, jobManager.jobId, actionData.name, "env.yaml")
+
+        val dataStores = DataLoader.getTaskData(actionData, env).exports
+        val dataStoresYaml = yamlMapper.writeValueAsString(dataStores)
+
+        writeConfigFile(dataStoresYaml, jobManager.jobId, actionData.name, "datastores.yaml")
+
+        writeConfigFile("jobId: ${jobManager.jobId}\nactionName: ${actionData.name}", jobManager.jobId, actionData.name, "runtime.yaml")
+
+    }
+
+    private fun writeConfigFile(content: String, jobId: String, actionName: String, fileName: String) {
+
+        val actionDistPath = createDistPath("$jobId/$actionName/$fileName")
+        val yarnJarPath = Path(config.yarn().hdfsJarsPath())
+        val targetPath = Path.mergePaths(yarnJarPath, actionDistPath)
+
+        val outputStream = fs.create(targetPath)
+        outputStream.writeUTF(content)
+        outputStream.close()
+        log.info("written file $targetPath")
+
+    }
+
+    private fun distributeFile(file: String, distributionPath: String) {
+
+        log.info("copying file $file, file status ${File(file).exists()}")
+
+        val actionDistPath = createDistPath("$distributionPath/$file")
+        val yarnJarPath = Path(config.yarn().hdfsJarsPath())
+        val targetPath = Path.mergePaths(yarnJarPath, actionDistPath)
+
+        log.info("target is $targetPath")
+
+        fs.copyFromLocalFile(false, true, Path(file), targetPath)
+
+    }
+
+    private fun createDistPath(path: String): Path = Path("/dist/$path")
+
+    private fun startRMClient(): AMRMClientAsync<AMRMClient.ContainerRequest> {
+        val client = AMRMClientAsync.createAMRMClientAsync<AMRMClient.ContainerRequest>(1000, this)
+        client.init(conf)
+        client.start()
+        return client
+    }
+
+    private fun createLocalResourceFromPath(path: Path): LocalResource {
+
+        val stat = fs.getFileStatus(path)
+        val fileResource = Records.newRecord(LocalResource::class.java)
+
+        fileResource.shouldBeUploadedToSharedCache = true
+        fileResource.visibility = LocalResourceVisibility.PUBLIC
+        fileResource.resource = URL.fromPath(path)
+        fileResource.size = stat.len
+        fileResource.timestamp = stat.modificationTime
+        fileResource.type = LocalResourceType.FILE
+        fileResource.visibility = LocalResourceVisibility.PUBLIC
+        return fileResource
+
+    }
+
+    private fun requestContainer(actionData: ActionData, capability: Resource) {
+
+        actionsBuffer.add(actionData)
+        log.info("About to ask container for action ${actionData.id} with mem ${capability.memory} and cores ${capability.virtualCores}. Action buffer size is: ${actionsBuffer.size}")
+
+        // we have an action to schedule, let's request a container
+        val priority: Priority = Records.newRecord(Priority::class.java)
+        priority.priority = 1
+        val containerReq = AMRMClient.ContainerRequest(capability, null, null, priority)
+        rmClient.addContainerRequest(containerReq)
+        log.info("Asked container for action ${actionData.id}")
+
+    }
+
+    override fun onNodesUpdated(updatedNodes: MutableList<NodeReport>?) {
+        log.info("Nodes change. Nothing to report.")
+    }
+
+    override fun onShutdownRequest() {
+        log.error("Shutdown requested.")
+        stopApplication(FinalApplicationStatus.KILLED, "Shutdown requested")
+    }
+
+    override fun getProgress(): Float {
+        return jobManager.registeredActions.size.toFloat() / completedContainersAndTaskIds.size
+    }
+
+    override fun onError(e: Throwable?) {
+        log.error("Error on AM", e)
+        stopApplication(FinalApplicationStatus.FAILED, "Error on AM")
+    }
+
+    override fun onContainersCompleted(statuses: MutableList<ContainerStatus>?) {
+        for (status in statuses!!) {
+            if (status.state == ContainerState.COMPLETE) {
+
+                val containerId = status.containerId.containerId
+                val task = containersIdsToTask[containerId]
+                rmClient.releaseAssignedContainer(status.containerId)
+
+                val taskId = task!!.id
+                if (status.exitStatus == 0) {
+
+                    //completedContainersAndTaskIds.put(containerId, task.id)
+                    jobManager.actionComplete(taskId)
+                    log.info("Container $containerId Complete with task $taskId with success.")
+                } else {
+                    // TODO: Check the getDiagnostics value and see if appropriate
+                    jobManager.actionFailed(taskId, status.diagnostics)
+                    log.warn("Container $containerId Complete with task $taskId with Failed status code (${status.exitStatus})")
+                }
+            }
+        }
+
+        if (jobManager.outOfActions) {
+            log.info("Finished all tasks successfully! Wow!")
+            jobManager.actionsCount()
+            stopApplication(FinalApplicationStatus.SUCCEEDED, "SUCCESS")
+        } else {
+            log.info("jobManager.registeredActions.size: ${jobManager.registeredActions.size}; completedContainersAndTaskIds.size: ${completedContainersAndTaskIds.size}")
+        }
+    }
+
+    private fun stopApplication(finalApplicationStatus: FinalApplicationStatus, appMessage: String) {
+
+        try {
+            rmClient.unregisterApplicationMaster(finalApplicationStatus, appMessage, null)
+        } catch (ex: YarnException) {
+
+            log.error("Failed to unregister application", ex)
+        } catch (e: IOException) {
+            log.error("Failed to unregister application", e)
+        }
+        rmClient.stop()
+        nmClient.stop()
+    }
+
+    companion object {
+        @JvmStatic
+        fun main(args: Array<String>) = AppMasterArgsParser().main(args)
+
+    }
+}
diff --git a/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/Client.kt b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/Client.kt
new file mode 100644
index 0000000..3c6845b
--- /dev/null
+++ b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/Client.kt
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.yarn
+
+import org.apache.activemq.ActiveMQConnectionFactory
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.leader.common.launcher.AmaOpts
+import org.apache.amaterasu.leader.common.execution.frameworks.FrameworkProvidersFactory
+import org.apache.amaterasu.leader.common.utilities.ActiveReportListener
+import org.apache.amaterasu.leader.common.utilities.MessagingClientUtil
+import org.apache.curator.framework.CuratorFrameworkFactory
+import org.apache.curator.framework.recipes.barriers.DistributedBarrier
+import org.apache.curator.retry.ExponentialBackoffRetry
+import org.apache.hadoop.fs.*
+import org.apache.hadoop.security.UserGroupInformation
+import org.apache.hadoop.yarn.api.ApplicationConstants
+import org.apache.hadoop.yarn.api.records.*
+import org.apache.hadoop.yarn.client.api.YarnClient
+import org.apache.hadoop.yarn.client.api.YarnClientApplication
+import org.apache.hadoop.yarn.conf.YarnConfiguration
+import org.apache.hadoop.yarn.exceptions.YarnException
+import org.apache.hadoop.yarn.util.Apps
+import org.apache.hadoop.yarn.util.ConverterUtils
+import org.apache.hadoop.yarn.util.Records
+import org.apache.log4j.LogManager
+import org.slf4j.LoggerFactory
+
+import javax.jms.*
+import java.io.File
+import java.io.FileInputStream
+import java.io.IOException
+import java.util.*
+
+import java.lang.System.exit
+
+class Client {
+    private val conf = YarnConfiguration()
+    private var fs: FileSystem? = null
+    private lateinit var consumer: MessageConsumer
+
+    @Throws(IOException::class)
+    private fun setLocalResourceFromPath(path: Path): LocalResource {
+
+        val stat = fs!!.getFileStatus(path)
+        val fileResource = Records.newRecord(LocalResource::class.java)
+        fileResource.resource = ConverterUtils.getYarnUrlFromPath(path)
+        fileResource.size = stat.len
+        fileResource.timestamp = stat.modificationTime
+        fileResource.type = LocalResourceType.FILE
+        fileResource.visibility = LocalResourceVisibility.PUBLIC
+        return fileResource
+    }
+
+    @Throws(Exception::class)
+    fun run(opts: AmaOpts, args: Array<String>) {
+
+        LogManager.resetConfiguration()
+        val config = ClusterConfig()
+        config.load(FileInputStream(opts.home + "/amaterasu.properties"))
+
+        // Create yarnClient
+        val yarnClient = YarnClient.createYarnClient()
+        yarnClient.init(conf)
+        yarnClient.start()
+
+        // Create application via yarnClient
+        var app: YarnClientApplication? = null
+        try {
+            app = yarnClient.createApplication()
+        } catch (e: YarnException) {
+            LOGGER.error("Error initializing yarn application with yarn client.", e)
+            exit(1)
+        } catch (e: IOException) {
+            LOGGER.error("Error initializing yarn application with yarn client.", e)
+            exit(2)
+        }
+
+        // Setup jars on hdfs
+        try {
+            fs = FileSystem.get(conf)
+        } catch (e: IOException) {
+            LOGGER.error("Eror creating HDFS client isntance.", e)
+            exit(3)
+        }
+
+        val jarPath = Path(config.yarn().hdfsJarsPath())
+        val jarPathQualified = fs!!.makeQualified(jarPath)
+        val distPath = Path.mergePaths(jarPathQualified, Path("/dist/"))
+
+        val appContext = app!!.applicationSubmissionContext
+
+        var newId = ""
+
+        val newIdVal = appContext.applicationId.toString() + "-" + UUID.randomUUID().toString()
+        if (opts.jobId.isEmpty()) {
+            newId = "--new-job-id=" + newIdVal
+        }
+
+
+        val commands = listOf("env AMA_NODE=" + System.getenv("AMA_NODE") +
+                " env HADOOP_USER_NAME=" + UserGroupInformation.getCurrentUser().userName +
+                " \$JAVA_HOME/bin/java" +
+                " -Dscala.usejavacp=false" +
+                " -Xmx2G" +
+                " org.apache.amaterasu.leader.yarn.ApplicationMaster " +
+                joinStrings(args) +
+                newId +
+                " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
+                " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
+
+        // Set up the container launch context for the application master
+        val amContainer = Records.newRecord(ContainerLaunchContext::class.java)
+        amContainer.commands = commands
+
+        // Setup local ama folder on hdfs.
+        try {
+
+            if (!fs!!.exists(jarPathQualified)) {
+                val home = File(opts.home)
+                fs!!.mkdirs(jarPathQualified)
+
+                for (f in home.listFiles()) {
+                    fs!!.copyFromLocalFile(false, true, Path(f.getAbsolutePath()), jarPathQualified)
+                }
+
+                // setup frameworks
+                val frameworkFactory = FrameworkProvidersFactory.apply(opts.env, config)
+                for (group in frameworkFactory.groups()) {
+                    val framework = frameworkFactory.getFramework(group)
+
+                    for (file in framework.groupResources) {
+                        if (file.exists())
+                            file.let {
+                                val target = Path.mergePaths(distPath, Path(it.path))
+                                fs!!.copyFromLocalFile(false, true, Path(file.path), target)
+                            }
+                    }
+                }
+            }
+
+        } catch (e: IOException) {
+            println("===> error " + e.message)
+            LOGGER.error("Error uploading ama folder to HDFS.", e)
+            exit(3)
+        } catch (ne: NullPointerException) {
+            println("===> ne error " + ne.message)
+            LOGGER.error("No files in home dir.", ne)
+            exit(4)
+        }
+
+        // get version of build
+        val version = config.version()
+
+        // get local resources pointers that will be set on the master container env
+        val leaderJarPath = String.format("/bin/leader-%s-all.jar", version)
+        LOGGER.info("Leader Jar path is: {}", leaderJarPath)
+        val mergedPath = Path.mergePaths(jarPath, Path(leaderJarPath))
+
+        // System.out.println("===> path: " + jarPathQualified);
+        LOGGER.info("Leader merged jar path is: {}", mergedPath)
+        var propFile: LocalResource? = null
+        var log4jPropFile: LocalResource? = null
+
+        try {
+            propFile = setLocalResourceFromPath(Path.mergePaths(jarPath, Path("/amaterasu.properties")))
+            log4jPropFile = setLocalResourceFromPath(Path.mergePaths(jarPath, Path("/log4j.properties")))
+        } catch (e: IOException) {
+            LOGGER.error("Error initializing yarn local resources.", e)
+            exit(4)
+        }
+
+        // set local resource on master container
+        val localResources = HashMap<String, LocalResource>()
+
+        // making the bin folder's content available to the appMaster
+        val bin = fs!!.listFiles(Path.mergePaths(jarPath, Path("/bin")), true)
+
+        while (bin.hasNext()) {
+            val binFile = bin.next()
+            localResources[binFile.path.name] = setLocalResourceFromPath(binFile.path)
+        }
+
+        localResources["amaterasu.properties"] = propFile!!
+        localResources["log4j.properties"] = log4jPropFile!!
+        amContainer.localResources = localResources
+
+        // Setup CLASSPATH for ApplicationMaster
+        val appMasterEnv = HashMap<String, String>()
+        setupAppMasterEnv(appMasterEnv)
+        appMasterEnv["AMA_CONF_PATH"] = String.format("%s/amaterasu.properties", config.YARN().hdfsJarsPath())
+        amContainer.environment = appMasterEnv
+
+        // Set up resource type requirements for ApplicationMaster
+        val capability = Records.newRecord(Resource::class.java)
+        capability.memory = config.YARN().master().memoryMB()
+        capability.virtualCores = config.YARN().master().cores()
+
+        // Finally, set-up ApplicationSubmissionContext for the application
+        appContext.applicationName = "amaterasu-" + opts.name
+        appContext.amContainerSpec = amContainer
+        appContext.resource = capability
+        appContext.queue = config.YARN().queue()
+        appContext.priority = Priority.newInstance(1)
+
+        // Submit application
+        val appId = appContext.applicationId
+        LOGGER.info("Submitting application {}", appId)
+        try {
+            yarnClient.submitApplication(appContext)
+
+        } catch (e: YarnException) {
+            LOGGER.error("Error submitting application.", e)
+            exit(6)
+        } catch (e: IOException) {
+            LOGGER.error("Error submitting application.", e)
+            exit(7)
+        }
+
+        val zkClient = CuratorFrameworkFactory.newClient(config.zk(),
+                ExponentialBackoffRetry(1000, 3))
+        zkClient.start()
+
+         val reportBarrier = DistributedBarrier(zkClient, "/$newIdVal-report-barrier")
+        reportBarrier.setBarrier()
+        reportBarrier.waitOnBarrier()
+
+        val address = String(zkClient.data.forPath("/$newIdVal/broker"))
+        println("===> $address")
+        consumer = MessagingClientUtil.setupMessaging(address)
+
+        var appReport: ApplicationReport? = null
+        var appState: YarnApplicationState
+
+        do {
+            try {
+                appReport = yarnClient.getApplicationReport(appId)
+            } catch (e: YarnException) {
+                LOGGER.error("Error getting application report.", e)
+                exit(8)
+            } catch (e: IOException) {
+                LOGGER.error("Error getting application report.", e)
+                exit(9)
+            }
+
+            appState = appReport!!.yarnApplicationState
+            if (isAppFinished(appState)) {
+                exit(0)
+                break
+            }
+
+            //LOGGER.info("Application not finished ({})", appReport.getProgress());
+            try {
+                Thread.sleep(100)
+            } catch (e: InterruptedException) {
+                LOGGER.error("Interrupted while waiting for job completion.", e)
+                exit(137)
+            }
+
+        } while (!isAppFinished(appState))
+
+        LOGGER.info("Application {} finished with state {}-{} at {}", appId, appState, appReport!!.finalApplicationStatus, appReport.finishTime)
+    }
+
+    private fun isAppFinished(appState: YarnApplicationState): Boolean {
+        return appState == YarnApplicationState.FINISHED ||
+                appState == YarnApplicationState.KILLED ||
+                appState == YarnApplicationState.FAILED
+    }
+
+    private fun setupAppMasterEnv(appMasterEnv: Map<String, String>) {
+        Apps.addToEnvironment(appMasterEnv,
+                ApplicationConstants.Environment.CLASSPATH.name,
+                ApplicationConstants.Environment.PWD.`$`() + File.separator + "*", File.pathSeparator)
+
+        for (c in conf.getStrings(
+                YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+                *YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+            Apps.addToEnvironment(appMasterEnv, ApplicationConstants.Environment.CLASSPATH.name,
+                    c.trim { it <= ' ' }, File.pathSeparator)
+        }
+    }
+
+    companion object {
+
+        private val LOGGER = LoggerFactory.getLogger(Client::class.java)
+
+        @Throws(Exception::class)
+        @JvmStatic
+        fun main(args: Array<String>) = ClientArgsParser(args).main(args)
+
+        private fun joinStrings(str: Array<String>): String {
+
+            val builder = StringBuilder()
+            for (s in str) {
+                builder.append(s)
+                builder.append(" ")
+            }
+            return builder.toString()
+
+        }
+    }
+}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ClientArgsParser.kt
old mode 100755
new mode 100644
similarity index 66%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ClientArgsParser.kt
index b7b0407..b9f1e67
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/ClientArgsParser.kt
@@ -14,8 +14,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.leader.yarn
 
-package object leader {
+import org.apache.amaterasu.leader.common.launcher.AmaOpts
+import org.apache.amaterasu.leader.common.launcher.ArgsParser
 
-}
+class ClientArgsParser(val args: Array<String>): ArgsParser() {
+
+    override fun run() {
+        var opts = AmaOpts(repo, branch, env, name, jobId, newJobId, report, home)
+        val client = Client()
+        client.run(opts, args)
+    }
+}
\ No newline at end of file
diff --git a/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.kt b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.kt
new file mode 100644
index 0000000..0702f01
--- /dev/null
+++ b/leader-yarn/src/main/kotlin/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.kt
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.leader.yarn
+
+import org.apache.amaterasu.common.logging.KLogging
+
+import java.nio.ByteBuffer
+
+import org.apache.hadoop.yarn.api.records.ContainerId
+import org.apache.hadoop.yarn.api.records.ContainerStatus
+import org.apache.hadoop.yarn.client.api.async.NMClientAsync
+
+
+
+class YarnNMCallbackHandler : KLogging() , NMClientAsync.CallbackHandler {
+
+    override fun onStartContainerError(containerId: ContainerId, t: Throwable) {
+        log.error("Container ${containerId.containerId} couldn't start.", t)
+    }
+
+    override fun onGetContainerStatusError(containerId: ContainerId, t: Throwable) {
+        log.error("Couldn't get status from container ${containerId.containerId}.", t)
+    }
+
+    override fun onContainerStatusReceived(containerId: ContainerId, containerStatus: ContainerStatus) {
+        log.info("Container ${containerId.containerId} has status of ${containerStatus.state}")
+    }
+
+    override fun onContainerStarted(containerId: ContainerId, allServiceResponse: Map<String, ByteBuffer>) {
+        log.info("Container ${containerId.containerId} Started")
+    }
+
+    override fun onStopContainerError(containerId: ContainerId, t: Throwable) {
+        log.error("Container ${containerId.containerId} has thrown an error", t)
+    }
+
+    override fun onContainerStopped(containerId: ContainerId) {
+        log.info("Container ${containerId.containerId} stopped")
+    }
+
+}
\ No newline at end of file
diff --git a/leader/build.gradle b/leader/build.gradle
index dc244fc..ce698ce 100644
--- a/leader/build.gradle
+++ b/leader/build.gradle
@@ -20,12 +20,15 @@
     id 'scala'
     id 'org.jetbrains.kotlin.jvm'
     id 'java'
-
 }
 
 sourceCompatibility = 1.8
 targetCompatibility = 1.8
 
+shadowJar {
+    zip64 true
+}
+
 repositories {
     maven {
         url "https://plugins.gradle.org/m2/"
@@ -45,27 +48,24 @@
     compile group: 'com.github.nscala-time', name: 'nscala-time_2.11', version: '2.2.0'
     compile group: 'org.apache.curator', name:'curator-test', version:'2.9.1'
     compile group: 'com.fasterxml.jackson.module', name: 'jackson-module-scala_2.11', version: '2.9.4'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.9.4'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.9.4'
-    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.4'
-    compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: '2.9.4'
-    compile group: 'org.eclipse.jetty', name: 'jetty-plus', version: '9.2.19.v20160908'
-    compile group: 'org.eclipse.jetty', name: 'jetty-server', version: '9.2.19.v20160908'
-    compile group: 'org.eclipse.jetty', name: 'jetty-http', version: '9.2.19.v20160908'
-    compile group: 'org.eclipse.jetty', name: 'jetty-io', version: '9.2.19.v20160908'
-    compile group: 'org.eclipse.jetty', name: 'jetty-servlet', version: '9.2.19.v20160908'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-annotations', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.8'
+    compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: '2.9.8'
+    compile group: 'org.eclipse.jetty', name: 'jetty-plus', version: '9.4.14.v20181114'
+    compile group: 'org.eclipse.jetty', name: 'jetty-server', version: '9.4.14.v20181114'
+    compile group: 'org.eclipse.jetty', name: 'jetty-http', version: '9.4.14.v20181114'
+    compile group: 'org.eclipse.jetty', name: 'jetty-io', version: '9.4.14.v20181114'
+    compile group: 'org.eclipse.jetty', name: 'jetty-servlet', version: '9.4.14.v20181114'
     compile group: 'org.eclipse.jetty.toolchain', name: 'jetty-test-helper', version: '4.0'
     compile group: 'org.yaml', name: 'snakeyaml', version: '1.23'
     compile group: 'commons-cli', name: 'commons-cli', version: '1.2'
     compile group: 'org.jsoup', name: 'jsoup', version: '1.10.2'
     compile group: 'org.scala-lang.modules', name: 'scala-async_2.11', version: '0.9.6'
     compile group: 'org.jsoup', name: 'jsoup', version: '1.10.2'
-    compile group: 'org.reflections', name: 'reflections', version: '0.9.11'
-    compile group: 'org.apache.activemq', name: 'activemq-broker', version: '5.15.3'
     compile group: 'net.liftweb', name: 'lift-json_2.11', version: '3.2.0'
     compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
     compile "org.jetbrains.kotlin:kotlin-reflect"
-    runtime group: 'org.apache.activemq', name: 'activemq-kahadb-store', version: '5.15.3'
 
     testCompile project(':common')
     testCompile "gradle.plugin.com.github.maiflai:gradle-scalatest:0.14"
@@ -92,7 +92,7 @@
             srcDirs = ['src/main/kotlin','src/main/java', 'src/main/scala']
         }
         java {
-            srcDirs = []
+            srcDirs = ['src/main/java']
         }
     }
 }
@@ -120,6 +120,20 @@
 compileKotlin{
     kotlinOptions.jvmTarget = "1.8"
 }
+
 compileTestKotlin {
     kotlinOptions.jvmTarget = "1.8"
 }
+
+compileTestScala {
+    dependsOn compileScala
+}
+
+compileScala {
+    dependsOn compileJava
+    classpath += files(compileJava.destinationDir) + files(compileKotlin.destinationDir)
+}
+
+compileJava {
+    dependsOn compileKotlin
+}
\ No newline at end of file
diff --git a/leader/src/main/java/org/apache/amaterasu/leader/yarn/ArgsParser.java b/leader/src/main/java/org/apache/amaterasu/leader/yarn/ArgsParser.java
deleted file mode 100644
index 38a9c38..0000000
--- a/leader/src/main/java/org/apache/amaterasu/leader/yarn/ArgsParser.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn;
-
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-
-public class ArgsParser {
-    private static Options getOptions() {
-
-        Options options = new Options();
-        options.addOption("r", "repo", true, "The git repo containing the job");
-        options.addOption("b", "branch", true, "The branch to be executed (default is master)");
-        options.addOption("e", "env", true, "The environment to be executed (test, prod, etc. values from the default env are taken if np env specified)");
-        options.addOption("n", "name", true, "The name of the job");
-        options.addOption("i", "job-id", true, "The jobId - should be passed only when resuming a job");
-        options.addOption("j", "new-job-id", true, "The jobId - should never be passed by a user");
-        options.addOption("r", "report", true, "The level of reporting");
-        options.addOption("h", "home", true, "The level of reporting");
-
-        return options;
-    }
-
-    public static JobOpts getJobOpts(String[] args) throws ParseException {
-
-        CommandLineParser parser = new BasicParser();
-        Options options = getOptions();
-        CommandLine cli = parser.parse(options, args);
-
-        JobOpts opts = new JobOpts();
-        if (cli.hasOption("repo")) {
-            opts.repo = cli.getOptionValue("repo");
-        }
-
-        if (cli.hasOption("branch")) {
-            opts.branch = cli.getOptionValue("branch");
-        }
-
-        if (cli.hasOption("env")) {
-            opts.env = cli.getOptionValue("env");
-        }
-
-        if (cli.hasOption("job-id")) {
-            opts.jobId = cli.getOptionValue("job-id");
-        }
-        if (cli.hasOption("new-job-id")) {
-            opts.newJobId = cli.getOptionValue("new-job-id");
-        }
-
-        if (cli.hasOption("report")) {
-            opts.report = cli.getOptionValue("report");
-        }
-
-        if (cli.hasOption("home")) {
-            opts.home = cli.getOptionValue("home");
-        }
-
-        if (cli.hasOption("name")) {
-            opts.name = cli.getOptionValue("name");
-        }
-
-        return opts;
-    }
-}
diff --git a/leader/src/main/java/org/apache/amaterasu/leader/yarn/Client.java b/leader/src/main/java/org/apache/amaterasu/leader/yarn/Client.java
deleted file mode 100644
index 5e86188..0000000
--- a/leader/src/main/java/org/apache/amaterasu/leader/yarn/Client.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn;
-
-import org.apache.activemq.ActiveMQConnectionFactory;
-import org.apache.amaterasu.common.configuration.ClusterConfig;
-import org.apache.amaterasu.leader.execution.frameworks.FrameworkProvidersFactory;
-import org.apache.amaterasu.leader.utilities.ActiveReportListener;
-import org.apache.amaterasu.sdk.frameworks.FrameworkSetupProvider;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.framework.recipes.barriers.DistributedBarrier;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.records.*;
-import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.Apps;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.Records;
-import org.apache.log4j.LogManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.jms.*;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.*;
-
-import static java.lang.System.exit;
-
-public class Client {
-
-    private final static Logger LOGGER = LoggerFactory.getLogger(Client.class);
-    private final Configuration conf = new YarnConfiguration();
-    private FileSystem fs;
-
-    private LocalResource setLocalResourceFromPath(Path path) throws IOException {
-
-        FileStatus stat = fs.getFileStatus(path);
-        LocalResource fileResource = Records.newRecord(LocalResource.class);
-        fileResource.setResource(ConverterUtils.getYarnUrlFromPath(path));
-        fileResource.setSize(stat.getLen());
-        fileResource.setTimestamp(stat.getModificationTime());
-        fileResource.setType(LocalResourceType.FILE);
-        fileResource.setVisibility(LocalResourceVisibility.PUBLIC);
-        return fileResource;
-    }
-
-    private void run(JobOpts opts, String[] args) throws Exception {
-
-        LogManager.resetConfiguration();
-        ClusterConfig config = new ClusterConfig();
-        config.load(new FileInputStream(opts.home + "/amaterasu.properties"));
-
-        // Create yarnClient
-        YarnClient yarnClient = YarnClient.createYarnClient();
-        yarnClient.init(conf);
-        yarnClient.start();
-
-        // Create application via yarnClient
-        YarnClientApplication app = null;
-        try {
-            app = yarnClient.createApplication();
-        } catch (YarnException e) {
-            LOGGER.error("Error initializing yarn application with yarn client.", e);
-            exit(1);
-        } catch (IOException e) {
-            LOGGER.error("Error initializing yarn application with yarn client.", e);
-            exit(2);
-        }
-
-        // Setup jars on hdfs
-        try {
-            fs = FileSystem.get(conf);
-        } catch (IOException e) {
-            LOGGER.error("Eror creating HDFS client isntance.", e);
-            exit(3);
-        }
-        Path jarPath = new Path(config.YARN().hdfsJarsPath());
-        Path jarPathQualified = fs.makeQualified(jarPath);
-
-        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
-
-        String newId = "";
-        if (opts.jobId == null) {
-            newId = "--new-job-id " + appContext.getApplicationId().toString() + "-" + UUID.randomUUID().toString();
-        }
-
-
-        List<String> commands = Collections.singletonList(
-                "env AMA_NODE=" + System.getenv("AMA_NODE") +
-                        " env HADOOP_USER_NAME=" + UserGroupInformation.getCurrentUser().getUserName() +
-                        " $JAVA_HOME/bin/java" +
-                        " -Dscala.usejavacp=false" +
-                        " -Xmx2G" +
-                        " org.apache.amaterasu.leader.yarn.ApplicationMaster " +
-                        joinStrings(args) +
-                        newId +
-                        " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
-                        " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"
-        );
-
-
-        // Set up the container launch context for the application master
-        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
-        amContainer.setCommands(commands);
-
-        // Setup local ama folder on hdfs.
-        try {
-
-            System.out.println("===> " + jarPathQualified);
-            if (!fs.exists(jarPathQualified)) {
-                File home = new File(opts.home);
-                fs.mkdirs(jarPathQualified);
-
-                for (File f : home.listFiles()) {
-                    fs.copyFromLocalFile(false, true, new Path(f.getAbsolutePath()), jarPathQualified);
-                }
-
-                // setup frameworks
-                System.out.println("===> setting up frameworks");
-                FrameworkProvidersFactory frameworkFactory = FrameworkProvidersFactory.apply(opts.env, config);
-                for (String group : frameworkFactory.groups()) {
-                    System.out.println("===> setting up " + group);
-                    FrameworkSetupProvider framework = frameworkFactory.getFramework(group);
-
-                    //creating a group folder
-                    Path frameworkPath = Path.mergePaths(jarPathQualified, new Path("/" + framework.getGroupIdentifier()));
-                    System.out.println("===> " + frameworkPath.toString());
-
-                    fs.mkdirs(frameworkPath);
-                    for (File file : framework.getGroupResources()) {
-                        if (file.exists())
-                            fs.copyFromLocalFile(false, true, new Path(file.getAbsolutePath()), frameworkPath);
-                    }
-                }
-            }
-        } catch (IOException e) {
-            System.out.println("===>" + e.getMessage());
-            LOGGER.error("Error uploading ama folder to HDFS.", e);
-            exit(3);
-        } catch (NullPointerException ne) {
-            System.out.println("===>" + ne.getMessage());
-            LOGGER.error("No files in home dir.", ne);
-            exit(4);
-        }
-
-        // get version of build
-        String version = config.version();
-
-        // get local resources pointers that will be set on the master container env
-        String leaderJarPath = String.format("/bin/leader-%s-all.jar", version);
-        LOGGER.info("Leader Jar path is: {}", leaderJarPath);
-        Path mergedPath = Path.mergePaths(jarPath, new Path(leaderJarPath));
-
-        // System.out.println("===> path: " + jarPathQualified);
-        LOGGER.info("Leader merged jar path is: {}", mergedPath);
-        LocalResource leaderJar = null;
-        LocalResource propFile = null;
-        LocalResource log4jPropFile = null;
-
-        try {
-            leaderJar = setLocalResourceFromPath(mergedPath);
-            propFile = setLocalResourceFromPath(Path.mergePaths(jarPath, new Path("/amaterasu.properties")));
-            log4jPropFile = setLocalResourceFromPath(Path.mergePaths(jarPath, new Path("/log4j.properties")));
-        } catch (IOException e) {
-            LOGGER.error("Error initializing yarn local resources.", e);
-            exit(4);
-        }
-
-        // set local resource on master container
-        Map<String, LocalResource> localResources = new HashMap<>();
-        //localResources.put("leader.jar", leaderJar);
-        // making the bin folder's content available to the appMaster
-        RemoteIterator<LocatedFileStatus> bin = fs.listFiles(Path.mergePaths(jarPath, new Path("/bin")), true);
-
-        while (bin.hasNext()){
-            LocatedFileStatus binFile = bin.next();
-            localResources.put(binFile.getPath().getName(), setLocalResourceFromPath(binFile.getPath()));
-        }
-
-        localResources.put("amaterasu.properties", propFile);
-        localResources.put("log4j.properties", log4jPropFile);
-        amContainer.setLocalResources(localResources);
-
-        // Setup CLASSPATH for ApplicationMaster
-        Map<String, String> appMasterEnv = new HashMap<>();
-        setupAppMasterEnv(appMasterEnv);
-        appMasterEnv.put("AMA_CONF_PATH", String.format("%s/amaterasu.properties", config.YARN().hdfsJarsPath()));
-        amContainer.setEnvironment(appMasterEnv);
-
-        // Set up resource type requirements for ApplicationMaster
-        Resource capability = Records.newRecord(Resource.class);
-        capability.setMemory(config.YARN().master().memoryMB());
-        capability.setVirtualCores(config.YARN().master().cores());
-
-        // Finally, set-up ApplicationSubmissionContext for the application
-        appContext.setApplicationName("amaterasu-" + opts.name);
-        appContext.setAMContainerSpec(amContainer);
-        appContext.setResource(capability);
-        appContext.setQueue(config.YARN().queue());
-        appContext.setPriority(Priority.newInstance(1));
-
-        // Submit application
-        ApplicationId appId = appContext.getApplicationId();
-        LOGGER.info("Submitting application {}", appId);
-        try {
-            yarnClient.submitApplication(appContext);
-
-        } catch (YarnException e) {
-            LOGGER.error("Error submitting application.", e);
-            exit(6);
-        } catch (IOException e) {
-            LOGGER.error("Error submitting application.", e);
-            exit(7);
-        }
-
-        CuratorFramework client = CuratorFrameworkFactory.newClient(config.zk(),
-                new ExponentialBackoffRetry(1000, 3));
-        client.start();
-
-        String newJobId = newId.replace("--new-job-id ", "");
-        System.out.println("===> /" + newJobId + "-report-barrier");
-        DistributedBarrier reportBarrier = new DistributedBarrier(client, "/" + newJobId + "-report-barrier");
-        reportBarrier.setBarrier();
-        reportBarrier.waitOnBarrier();
-
-        String address = new String(client.getData().forPath("/" + newJobId + "/broker"));
-        System.out.println("===> " + address);
-        setupReportListener(address);
-
-        ApplicationReport appReport = null;
-        YarnApplicationState appState;
-
-        do {
-            try {
-                appReport = yarnClient.getApplicationReport(appId);
-            } catch (YarnException e) {
-                LOGGER.error("Error getting application report.", e);
-                exit(8);
-            } catch (IOException e) {
-                LOGGER.error("Error getting application report.", e);
-                exit(9);
-            }
-            appState = appReport.getYarnApplicationState();
-            if (isAppFinished(appState)) {
-                exit(0);
-                break;
-            }
-            //LOGGER.info("Application not finished ({})", appReport.getProgress());
-            try {
-                Thread.sleep(100);
-            } catch (InterruptedException e) {
-                LOGGER.error("Interrupted while waiting for job completion.", e);
-                exit(137);
-            }
-        } while (!isAppFinished(appState));
-
-        LOGGER.info("Application {} finished with state {}-{} at {}", appId, appState, appReport.getFinalApplicationStatus(), appReport.getFinishTime());
-    }
-
-    private boolean isAppFinished(YarnApplicationState appState) {
-        return appState == YarnApplicationState.FINISHED ||
-                appState == YarnApplicationState.KILLED ||
-                appState == YarnApplicationState.FAILED;
-    }
-
-    public static void main(String[] args) throws Exception {
-        Client c = new Client();
-
-        JobOpts opts = ArgsParser.getJobOpts(args);
-
-        c.run(opts, args);
-    }
-
-    private static String joinStrings(String[] str) {
-
-        StringBuilder builder = new StringBuilder();
-        for (String s : str) {
-            builder.append(s);
-            builder.append(" ");
-        }
-        return builder.toString();
-
-    }
-
-    private void setupReportListener(String address) throws JMSException {
-
-        ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory(address);
-        Connection conn = cf.createConnection();
-        conn.start();
-
-        Session session = conn.createSession(false, Session.AUTO_ACKNOWLEDGE);
-
-        //TODO: move to a const in common
-        Topic destination = session.createTopic("JOB.REPORT");
-
-        MessageConsumer consumer = session.createConsumer(destination);
-        consumer.setMessageListener(new ActiveReportListener());
-
-    }
-
-    private void setupAppMasterEnv(Map<String, String> appMasterEnv) {
-        Apps.addToEnvironment(appMasterEnv,
-                ApplicationConstants.Environment.CLASSPATH.name(),
-                ApplicationConstants.Environment.PWD.$() + File.separator + "*", File.pathSeparator);
-
-        for (String c : conf.getStrings(
-                YarnConfiguration.YARN_APPLICATION_CLASSPATH,
-                YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
-            Apps.addToEnvironment(appMasterEnv, ApplicationConstants.Environment.CLASSPATH.name(),
-                    c.trim(), File.pathSeparator);
-        }
-    }
-}
\ No newline at end of file
diff --git a/leader/src/main/java/org/apache/amaterasu/leader/yarn/JobOpts.java b/leader/src/main/java/org/apache/amaterasu/leader/yarn/JobOpts.java
deleted file mode 100644
index b8c29b7..0000000
--- a/leader/src/main/java/org/apache/amaterasu/leader/yarn/JobOpts.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn;
-
-public class JobOpts {
-    public String repo = "";
-    public String branch = "master";
-    public String env = "default";
-    public String name = "amaterasu-job";
-    public String jobId = null;
-    public String newJobId = null;
-    public String report ="code";
-    public String home ="";
-}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/Kami.scala b/leader/src/main/scala/org/apache/amaterasu/leader/Kami.scala
deleted file mode 100755
index 9dab647..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/Kami.scala
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader
-
-import java.util.concurrent.{BlockingQueue, ConcurrentHashMap, LinkedBlockingQueue}
-
-import com.github.nscala_time.time.Imports._
-import org.apache.amaterasu.common.dataobjects.JobData
-
-import scala.collection.JavaConversions
-
-/**
-  * This class is the cluster manager for Amaterasu (a goddess object), managing the state of the cluster:
-  * - The jobsQueue containing all waiting jobs
-  * - completedJobs
-  * - failedJobs
-  */
-class Kami {
-
-  private[amaterasu] var frameworkId: String = null
-  private var jobsQueue: BlockingQueue[JobData] = null
-  private var completedJobs: ConcurrentHashMap[String, JobData] = new ConcurrentHashMap[String, JobData]()
-  private var failedJobs: ConcurrentHashMap[String, JobData] = new ConcurrentHashMap[String, JobData]()
-
-  def getQueuedJobs(): Array[JobData] = {
-
-    jobsQueue.toArray(new Array[JobData](0))
-
-  }
-
-  /**
-    * Queues a job for execution
-    * @param jobUrl the url of the git repo containing the job definition
-    * @return the id of the newly created job
-    */
-  def addJob(jobUrl: String): String = {
-
-    val id = s"job_${System.currentTimeMillis()}"
-    jobsQueue.put(JobData(
-      src = jobUrl,
-      id = id,
-      timeCreated = DateTime.now,
-      startTime = null,
-      endTime = null
-    ))
-
-    id
-
-  }
-
-  def addJob(job: JobData): Unit = {
-
-    jobsQueue.put(job)
-
-  }
-
-  def getNextJob(): JobData = {
-
-    jobsQueue.take()
-
-  }
-
-}
-
-object Kami {
-
-  /**
-    *
-    * @param jobs
-    * @return
-    */
-  def apply(jobs: Seq[String]): Kami = {
-
-    val goddess = new Kami()
-    goddess.jobsQueue = new LinkedBlockingQueue[JobData]()
-
-    if (jobs != null) {
-
-      goddess.jobsQueue.addAll(JavaConversions.asJavaCollection(jobs.map(j => JobData(
-        src = j,
-        id = s"job_${System.currentTimeMillis()}",
-        timeCreated = DateTime.now,
-        startTime = null,
-        endTime = null
-      ))))
-
-    }
-
-    goddess
-  }
-
-  def apply(): Kami = {
-
-    apply(null)
-
-  }
-}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/common/actions/SequentialAction.scala b/leader/src/main/scala/org/apache/amaterasu/leader/common/actions/SequentialAction.scala
deleted file mode 100755
index d9be4dd..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/common/actions/SequentialAction.scala
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.common.actions
-
-import java.util
-import java.util.concurrent.BlockingQueue
-
-import org.apache.amaterasu.common.configuration.enums.ActionStatus
-import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.common.execution.actions.Action
-import org.apache.curator.framework.CuratorFramework
-import org.apache.zookeeper.CreateMode
-
-import scala.collection.JavaConverters._
-import scala.collection.mutable.ListBuffer
-
-class SequentialAction extends Action {
-
-  var jobId: String = _
-  var jobsQueue: BlockingQueue[ActionData] = _
-  var attempts: Int = 2
-  var attempt: Int = 1
-
-  def execute(): Unit = {
-
-    try {
-
-      announceQueued
-      jobsQueue.add(data)
-
-    }
-    catch {
-
-      //TODO: this will not invoke the error action
-      case e: Exception => handleFailure(e.getMessage)
-
-    }
-
-  }
-
-  override def handleFailure(message: String): String = {
-
-    println(s"Part ${data.getName} of group ${data.getGroupId} and of type ${data.getTypeId} failed on attempt $attempt with message: $message")
-    attempt += 1
-
-    if (attempt <= attempts) {
-      data.getId
-    }
-    else {
-      announceFailure()
-      println(s"===> moving to err action ${data.errorActionId}")
-      data.setStatus ( ActionStatus.failed )
-      data.errorActionId
-    }
-
-  }
-
-}
-
-object SequentialAction {
-
-  def apply(name: String,
-            src: String,
-            groupId: String,
-            typeId: String,
-            exports: Map[String, String],
-            jobId: String,
-            queue: BlockingQueue[ActionData],
-            zkClient: CuratorFramework,
-            attempts: Int): SequentialAction = {
-
-    val action = new SequentialAction()
-
-    action.jobsQueue = queue
-
-    // creating a znode for the action
-    action.client = zkClient
-    action.actionPath = action.client.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(s"/$jobId/task-", ActionStatus.pending.toString.getBytes())
-    action.actionId = action.actionPath.substring(action.actionPath.indexOf("task-") + 5)
-
-    action.attempts = attempts
-    action.jobId = jobId
-    val javaExports = exports.asJava
-    action.data = new ActionData(ActionStatus.pending, name, src, groupId, typeId, action.actionId, javaExports, new util.ArrayList[String]())
-    action.jobsQueue = queue
-    action.client = zkClient
-
-    action
-  }
-
-}
-
-object ErrorAction {
-
-  def apply(name: String,
-            src: String,
-            parent: String,
-            groupId: String,
-            typeId: String,
-            jobId: String,
-            queue: BlockingQueue[ActionData],
-            zkClient: CuratorFramework): SequentialAction = {
-
-    val action = new SequentialAction()
-
-    action.jobsQueue = queue
-
-    // creating a znode for the action
-    action.client = zkClient
-    action.actionPath = action.client.create().withMode(CreateMode.PERSISTENT).forPath(s"/$jobId/task-$parent-error", ActionStatus.pending.toString.getBytes())
-    action.actionId = action.actionPath.substring(action.actionPath.indexOf('-') + 1).replace("/", "-")
-
-    action.jobId = jobId
-    action.data = new ActionData(ActionStatus.pending, name, src, groupId, typeId, action.actionId, new util.HashMap[String, String](), new util.ArrayList[String]())
-    action.jobsQueue = queue
-    action.client = zkClient
-
-    action
-
-  }
-}
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/dsl/GitUtil.scala b/leader/src/main/scala/org/apache/amaterasu/leader/dsl/GitUtil.scala
deleted file mode 100755
index b2492bb..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/dsl/GitUtil.scala
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-//package org.apache.amaterasu.leader.dsl
-//
-//import java.io.File
-//
-//import org.eclipse.jgit.api.Git
-//
-//import scala.reflect.io.Path
-
-/**
-  * The GitUtil class handles getting the job git repository
-  */
-//object GitUtil {
-//
-//  def cloneRepo(repoAddress: String, branch: String) = {
-//
-//    val path = Path("repo")
-//    path.deleteRecursively()
-//
-//    //TODO: add authentication
-//    val git = Git.cloneRepository
-//      .setURI(repoAddress)
-//      .setDirectory(new File("repo"))
-//      .setBranch(branch)
-//      .call
-//
-//    git.close()
-//  }
-//
-//}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/dsl/JobParser.scala b/leader/src/main/scala/org/apache/amaterasu/leader/dsl/JobParser.scala
deleted file mode 100755
index e08489c..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/dsl/JobParser.scala
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.dsl
-
-import java.util.concurrent.BlockingQueue
-
-import com.fasterxml.jackson.databind.node.ArrayNode
-import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
-import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.execution.JobManager
-import org.apache.amaterasu.leader.common.actions.{ErrorAction, SequentialAction}
-import org.apache.amaterasu.leader.common.execution.actions.Action
-import org.apache.curator.framework.CuratorFramework
-
-import scala.collection.JavaConverters._
-import scala.collection.mutable
-import scala.io.Source
-
-/**
-  * The JobParser class is in charge of parsing the maki.yaml file which
-  * describes the workflow of an amaterasu job
-  */
-object JobParser {
-
-  def loadMakiFile(): String = {
-
-    Source.fromFile("repo/maki.yml").mkString
-
-  }
-
-  /**
-    * Parses the maki.yml string and creates a job manager
-    *
-    * @param jobId
-    * @param maki a string containing the YAML definition of the job
-    * @param actionsQueue
-    * @param client
-    * @return
-    */
-  def parse(jobId: String,
-            maki: String,
-            actionsQueue: BlockingQueue[ActionData],
-            client: CuratorFramework,
-            attempts: Int): JobManager = {
-
-    val mapper = new ObjectMapper(new YAMLFactory())
-
-    val job = mapper.readTree(maki)
-
-    // loading the job details
-    val manager = JobManager(jobId, job.path("job-name").asText, actionsQueue, client)
-
-    // iterating the flow list and constructing the job's flow
-    val actions = job.path("flow").asInstanceOf[ArrayNode].asScala.toSeq
-
-    parseActions(actions, manager, actionsQueue, attempts, null)
-
-    manager
-  }
-
-  /**
-    * parseActions is a recursive function, for building the workflow of
-    * the job
-    * God, I miss Clojure
-    *
-    * @param actions  a seq containing the definitions of all the actions
-    * @param manager  the job manager for the job
-    * @param actionsQueue
-    * @param previous the previous action, this is used in order to add the current action
-    *                 to the nextActionIds
-    */
-  def parseActions(actions: Seq[JsonNode],
-                   manager: JobManager,
-                   actionsQueue: BlockingQueue[ActionData],
-                   attempts: Int,
-                   previous: Action): Unit = {
-
-    if (actions.isEmpty)
-      return
-
-    val actionData = actions.head
-
-    val action = parseSequentialAction(
-      actionData,
-      manager.jobId,
-      actionsQueue,
-      manager.client,
-      attempts
-    )
-
-    //updating the list of frameworks setup
-    manager.frameworks.getOrElseUpdate(action.data.getGroupId,
-                                       new mutable.HashSet[String]())
-                                       .add(action.data.getTypeId)
-
-
-    if (manager.head == null) {
-      manager.head = action
-    }
-
-    if (previous != null) {
-      previous.data.getNextActionIds.add(action.actionId)
-    }
-    manager.registerAction(action)
-
-    val errorNode = actionData.path("error")
-
-    if (!errorNode.isMissingNode) {
-
-      val errorAction = parseErrorAction(
-        errorNode,
-        manager.jobId,
-        action.data.getId,
-        actionsQueue,
-        manager.client
-      )
-
-      action.data.errorActionId = errorAction.data.getId
-      manager.registerAction(errorAction)
-
-      //updating the list of frameworks setup
-      manager.frameworks.getOrElseUpdate(errorAction.data.getGroupId,
-        new mutable.HashSet[String]())
-        .add(errorAction.data.getTypeId)
-    }
-
-    parseActions(actions.tail, manager, actionsQueue, attempts, action)
-
-  }
-
-  def parseSequentialAction(action: JsonNode,
-                            jobId: String,
-                            actionsQueue: BlockingQueue[ActionData],
-                            client: CuratorFramework,
-                            attempts: Int): SequentialAction = {
-
-    SequentialAction(action.path("name").asText,
-      action.path("file").asText,
-      action.path("runner").path("group").asText,
-      action.path("runner").path("type").asText,
-      action.path("exports").fields().asScala.toSeq.map(e => (e.getKey, e.getValue.asText())).toMap,
-      jobId,
-      actionsQueue,
-      client,
-      attempts)
-  }
-
-  def parseErrorAction(action: JsonNode,
-                       jobId: String,
-                       parent: String,
-                       actionsQueue: BlockingQueue[ActionData],
-                       client: CuratorFramework): SequentialAction = {
-
-    ErrorAction(
-      action.path("name").asText,
-      action.path("file").asText,
-      parent,
-      action.path("group").asText,
-      action.path("type").asText,
-      jobId,
-      actionsQueue,
-      client
-    )
-
-  }
-}
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/execution/JobLoader.scala b/leader/src/main/scala/org/apache/amaterasu/leader/execution/JobLoader.scala
index 234070d..f24a5a2 100755
--- a/leader/src/main/scala/org/apache/amaterasu/leader/execution/JobLoader.scala
+++ b/leader/src/main/scala/org/apache/amaterasu/leader/execution/JobLoader.scala
@@ -21,8 +21,8 @@
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 import org.apache.amaterasu.common.dataobjects.ActionData
 import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.leader.common.dsl.GitUtil
-import org.apache.amaterasu.leader.dsl.JobParser
+import org.apache.amaterasu.leader.common.dsl.{GitUtil, JobParser}
+import org.apache.amaterasu.leader.common.execution.JobManager
 import org.apache.curator.framework.CuratorFramework
 import org.apache.zookeeper.CreateMode
 
@@ -91,8 +91,8 @@
     val tasks = client.getChildren.forPath(s"/$jobId").asScala.toSeq.filter(n => n.startsWith("task"))
     for (task <- tasks) {
 
-      if (client.getData.forPath(s"/$jobId/$task").sameElements(ActionStatus.queued.toString.getBytes) ||
-        client.getData.forPath(s"/$jobId/$task").sameElements(ActionStatus.started.toString.getBytes)) {
+      if (client.getData.forPath(s"/$jobId/$task").sameElements(ActionStatus.Queued.toString.getBytes) ||
+        client.getData.forPath(s"/$jobId/$task").sameElements(ActionStatus.Started.toString.getBytes)) {
 
         jobManager.reQueueAction(task.substring(task.indexOf("task-") + 5))
 
@@ -102,4 +102,4 @@
 
   }
 
-}
\ No newline at end of file
+}
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/Launcher.scala b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/Launcher.scala
deleted file mode 100755
index 0f23438..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/Launcher.scala
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.mesos
-
-import java.io.FileInputStream
-
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.leader.Kami
-import org.apache.amaterasu.leader.mesos.schedulers.ClusterScheduler
-import org.apache.mesos.{MesosSchedulerDriver, Protos}
-
-object Launcher extends Logging with App {
-
-  println(
-    """
-       Apache
-           (                      )
-           )\        )      )   ( /(   (   (       )        (
-          ((_)(     (     ( /(  )\()  ))\  )(   ( /(  (    ))\
-         )\ _ )\    )\  ' )(_))(_))/ /((_)(()\  )(_)) )\  /((_)
-         (_)_\(_) _((_)) ((_) _ | |_ (_))   ((_)((_)_ ((_)(_))(
-          / _ \  | '   \()/ _` ||  _|/ -_) | '_|/ _` |(_-<| || |
-         /_/ \_\ |_|_|_|  \__,_| \__|\___| |_|  \__,_|/__/ \_,_|
-
-         Durable Dataflow Cluster
-         Version 0.1.0
-    """
-  )
-
-  val config = ClusterConfig(new FileInputStream("scripts/amaterasu.properties"))
-  val kami = Kami(Seq("https://github.com/roadan/amaterasu-job-sample.git"))
-
-  // for multi-tenancy reasons the name of the framework is composed out of the username ( which defaults
-  // to empty string concatenated with - Amaterasu
-  val framework = Protos.FrameworkInfo.newBuilder()
-    .setName(s"${config.user} - Amaterasu")
-    .setFailoverTimeout(config.timeout)
-    .setUser(config.user).build()
-
-  log.debug(s"The framework user is ${config.user}")
-  val masterAddress = s"${config.master}:${config.masterPort}"
-  val scheduler = ClusterScheduler(kami, config)
-  val driver = new MesosSchedulerDriver(scheduler, framework, masterAddress)
-
-  log.debug(s"Connecting to master on: $masterAddress")
-  driver.run()
-
-}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/AmaterasuScheduler.scala b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/AmaterasuScheduler.scala
index 68c8f85..13b2413 100755
--- a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/AmaterasuScheduler.scala
+++ b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/AmaterasuScheduler.scala
@@ -29,4 +29,4 @@
       .setScalar(Value.Scalar.newBuilder().setValue(value)).build()
   }
 
-}
+}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/ClusterScheduler.scala b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/ClusterScheduler.scala
deleted file mode 100755
index 06a2596..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/ClusterScheduler.scala
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.mesos.schedulers
-
-import java.util
-
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.leader.Kami
-import org.apache.mesos.Protos._
-import org.apache.mesos.{Protos, SchedulerDriver}
-
-import scala.collection.JavaConverters._
-
-class ClusterScheduler extends AmaterasuScheduler {
-
-  private var kami: Kami = _
-  private var config: ClusterConfig = _
-
-  private var driver: SchedulerDriver = _
-
-  def error(driver: SchedulerDriver, message: String) {}
-
-  def executorLost(driver: SchedulerDriver, executorId: ExecutorID, slaveId: SlaveID, status: Int) {}
-
-  def slaveLost(driver: SchedulerDriver, slaveId: SlaveID) {}
-
-  def disconnected(driver: SchedulerDriver) {}
-
-  def frameworkMessage(driver: SchedulerDriver, executorId: ExecutorID, slaveId: SlaveID, data: Array[Byte]) {}
-
-  def statusUpdate(driver: SchedulerDriver, status: TaskStatus) {
-
-    println(s"received status update $status")
-
-  }
-
-  def offerRescinded(driver: SchedulerDriver, offerId: OfferID) = {}
-
-  def validateOffer(offer: Offer): Boolean = {
-
-    val resources = offer.getResourcesList.asScala
-
-    resources.count(r => r.getName == "cpus" && r.getScalar.getValue >= config.Jobs.cpus) > 0 &&
-      resources.count(r => r.getName == "mem" && r.getScalar.getValue >= config.Jobs.mem) > 0 &&
-      resources.count(r => r.getName == "disk" && r.getScalar.getValue >= config.Jobs.repoSize) > 0
-  }
-
-  def buildCommandInfo(jobSrc: String): CommandInfo = {
-
-    println(s"Starting amaterasu job: java -cp ${config.JarName} org.apache.amaterasu.leader.mesos.executors.JobExecutor $jobSrc")
-
-    CommandInfo.newBuilder
-      //.addUris(URI.newBuilder.setValue(fsUtil.getJarUrl()).setExecutable(false))
-      //.addUris(URI.newBuilder.setValue(jobSrc)
-      .setValue(s"java -cp ${config.Jar} org.apache.amaterasu.leader.mesos.executors.JobExecutor $jobSrc")
-      .build()
-  }
-
-  def resourceOffers(driver: SchedulerDriver, offers: util.List[Offer]): Unit = {
-
-    for (offer <- offers.asScala) {
-      log.debug(s"offer received by Amaterasu Cluster Scheduler: $offer")
-
-      if (validateOffer(offer)) {
-
-        log.info(s"Accepting offer, id=${offer.getId}")
-        // getting the next job to be executed
-        val job = kami.getNextJob()
-
-        val taskId = Protos.TaskID.newBuilder().setValue(job.id).build()
-        log.debug(s"Preparing to launch job $taskId on slave ${offer.getSlaveId}")
-
-        val task = TaskInfo.newBuilder
-          .setName(s"Amaterasu-job-${taskId.getValue}")
-          .setTaskId(taskId)
-          .addResources(createScalarResource("cpus", config.Jobs.cpus))
-          .addResources(createScalarResource("mem", config.Jobs.mem))
-          .addResources(createScalarResource("disk", config.Jobs.repoSize))
-          .setSlaveId(offer.getSlaveId)
-          .setTaskId(taskId).setCommand(buildCommandInfo(job.src)).build()
-
-        log.debug(s"Starting task Amaterasu-job-${taskId.getValue}")
-        log.debug(s"With resources cpus=${config.Jobs.cpus}, mem=${config.Jobs.mem}, disk=${config.Jobs.repoSize}")
-
-        driver.launchTasks(List(offer.getId).asJavaCollection, List(task).asJavaCollection)
-
-      }
-      else {
-
-        log.info("Declining offer")
-        driver.declineOffer(offer.getId)
-
-      }
-    }
-
-  }
-
-  def registered(driver: SchedulerDriver, frameworkId: FrameworkID, masterInfo: MasterInfo): Unit = {
-
-    log.info("[registered] framework:" + frameworkId.getValue + " master:" + masterInfo)
-
-    kami.frameworkId = frameworkId.getValue
-    this.driver = driver
-
-  }
-
-  def reregistered(driver: SchedulerDriver, masterInfo: Protos.MasterInfo) {}
-}
-
-object ClusterScheduler {
-
-  def apply(kami: Kami, config: ClusterConfig): ClusterScheduler = {
-
-    val scheduler = new ClusterScheduler()
-    scheduler.kami = kami
-    scheduler.config = config
-    scheduler
-
-  }
-
-}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/JobScheduler.scala b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/JobScheduler.scala
index 961dc80..464e3bf 100755
--- a/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/JobScheduler.scala
+++ b/leader/src/main/scala/org/apache/amaterasu/leader/mesos/schedulers/JobScheduler.scala
@@ -17,13 +17,11 @@
 package org.apache.amaterasu.leader.mesos.schedulers
 
 import java.io.{File, PrintWriter, StringWriter}
-import java.nio.file.Files.copy
-import java.nio.file.Paths.get
-import java.nio.file.StandardCopyOption.REPLACE_EXISTING
+import java.nio.file.{Files, Path, Paths, StandardCopyOption}
 import java.util
+import java.util.UUID
 import java.util.concurrent.locks.ReentrantLock
 import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue}
-import java.util.{Collections, UUID}
 
 import com.fasterxml.jackson.databind.ObjectMapper
 import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
@@ -31,18 +29,19 @@
 import org.apache.amaterasu.common.configuration.ClusterConfig
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.common.execution.actions.NotificationLevel.NotificationLevel
-import org.apache.amaterasu.common.execution.actions.{Notification, NotificationLevel, NotificationType}
+import org.apache.amaterasu.common.execution.actions.Notification
+import org.apache.amaterasu.common.execution.actions.enums.{NotificationLevel, NotificationType}
 import org.apache.amaterasu.leader.common.configuration.ConfigManager
+import org.apache.amaterasu.leader.common.execution.JobManager
+import org.apache.amaterasu.leader.common.execution.frameworks.FrameworkProvidersFactory
 import org.apache.amaterasu.leader.common.utilities.DataLoader
-import org.apache.amaterasu.leader.execution.frameworks.FrameworkProvidersFactory
-import org.apache.amaterasu.leader.execution.{JobLoader, JobManager}
+import org.apache.amaterasu.leader.execution.JobLoader
 import org.apache.amaterasu.leader.utilities.HttpServer
+import org.apache.commons.io.FileUtils
 import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
 import org.apache.curator.retry.ExponentialBackoffRetry
 import org.apache.log4j.LogManager
 import org.apache.mesos.Protos.CommandInfo.URI
-import org.apache.mesos.Protos.ContainerInfo.DockerInfo
 import org.apache.mesos.Protos.Environment.Variable
 import org.apache.mesos.Protos._
 import org.apache.mesos.protobuf.ByteString
@@ -73,6 +72,9 @@
   private var resume: Boolean = false
   private var reportLevel: NotificationLevel = _
 
+  private val jarFile = new File(this.getClass.getProtectionDomain.getCodeSource.getLocation.getPath)
+  private val amaDist = new File(s"${new File(jarFile.getParent).getParent}/dist")
+
   val slavesExecutors = new TrieMap[String, ExecutorInfo]
   private var awsEnv: String = ""
 
@@ -103,9 +105,9 @@
     val notification = mapper.readValue(data, classOf[Notification])
 
     reportLevel match {
-      case NotificationLevel.code => printNotification(notification)
-      case NotificationLevel.execution =>
-        if (notification.notLevel != NotificationLevel.code)
+      case NotificationLevel.Code => printNotification(notification)
+      case NotificationLevel.Execution =>
+        if (notification.getNotLevel != NotificationLevel.Code)
           printNotification(notification)
       case _ =>
     }
@@ -114,11 +116,9 @@
 
   def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
 
-    log.info("statusUpdate() task "+ status.getTaskId +" is in state " + status.getState.toString)
-
     status.getState match {
-      case TaskState.TASK_STARTING => jobManager.actionStarted(status.getTaskId.getValue)
-      case TaskState.TASK_RUNNING => jobManager.actionRunning(status.getTaskId.getValue)
+      case TaskState.TASK_STARTING => log.info("Task starting ...")
+      case TaskState.TASK_RUNNING => jobManager.actionStarted(status.getTaskId.getValue)
       case TaskState.TASK_FINISHED => jobManager.actionComplete(status.getTaskId.getValue)
       case TaskState.TASK_FAILED |
            TaskState.TASK_KILLED |
@@ -133,8 +133,8 @@
 
     val resources = offer.getResourcesList.asScala
 
-    resources.count(r => r.getName == "cpus" && r.getScalar.getValue >= config.Jobs.Tasks.cpus) > 0 &&
-      resources.count(r => r.getName == "mem" && r.getScalar.getValue >= config.Jobs.Tasks.mem) > 0
+    resources.count(r => r.getName == "cpus" && r.getScalar.getValue >= config.jobs.tasks.cpus) > 0 &&
+      resources.count(r => r.getName == "mem" && r.getScalar.getValue >= config.jobs.tasks.mem) > 0
   }
 
   def offerRescinded(driver: SchedulerDriver, offerId: OfferID): Unit = {
@@ -146,11 +146,13 @@
 
   def resourceOffers(driver: SchedulerDriver, offers: util.List[Offer]): Unit = {
 
+    println(jobManager.toString)
+
     for (offer <- offers.asScala) {
 
       if (validateOffer(offer)) {
 
-        log.info(s"Evaluating offer, id=${offer.getId}")
+        log.info(s"Accepting offer, id=${offer.getId}")
 
         // this is done to avoid the processing the same action
         // multiple times
@@ -161,18 +163,17 @@
           if (actionData != null) {
             val taskId = Protos.TaskID.newBuilder().setValue(actionData.getId).build()
 
-            log.info(s"Accepting offer, id=${offer.getId}, taskId=${taskId}")
             // setting up the configuration files for the container
-            val envYaml = configManager.getActionConfigContent(actionData.getName, "") //TODO: replace with the value in actionData.config
-            writeConfigFile(envYaml, jobManager.jobId, actionData.getName, "env.yaml")
+            val envYaml = configManager.getActionConfigContent(actionData.getName, actionData.getConfig)
+            writeConfigFile(envYaml, jobManager.getJobId, actionData.getName, "env.yaml")
 
-            val dataStores = DataLoader.getTaskData(actionData, env).exports
+            val dataStores = DataLoader.getTaskData(actionData, env).getExports
             val writer = new StringWriter()
             yamlMapper.writeValue(writer, dataStores)
             val dataStoresYaml = writer.toString
-            writeConfigFile(dataStoresYaml, jobManager.jobId, actionData.getName, "datastores.yaml")
+            writeConfigFile(dataStoresYaml, jobManager.getJobId, actionData.getName, "datastores.yaml")
 
-            writeConfigFile(s"jobId: ${jobManager.jobId}\nactionName: ${actionData.getName}", jobManager.jobId, actionData.getName, "runtime.yaml")
+            writeConfigFile(s"jobId: ${jobManager.getJobId}\nactionName: ${actionData.getName}", jobManager.getJobId, actionData.getName, "runtime.yaml")
 
             offersToTaskIds.put(offer.getId.getValue, taskId.getValue)
 
@@ -181,7 +182,7 @@
             executionMap.putIfAbsent(offer.getSlaveId.toString, new ConcurrentHashMap[String, ActionStatus].asScala)
 
             val slaveActions = executionMap(offer.getSlaveId.toString)
-            slaveActions.put(taskId.getValue, ActionStatus.started)
+            slaveActions.put(taskId.getValue, ActionStatus.Started)
 
 
             val frameworkProvider = frameworkFactory.providers(actionData.getGroupId)
@@ -190,130 +191,151 @@
             // searching for an executor that already exist on the slave, if non exist
             // we create a new one
             var executor: ExecutorInfo = null
-            val slaveId = offer.getSlaveId.getValue
-            val container = ContainerInfo.newBuilder
 
-            slavesExecutors.synchronized {
-              //              if (slavesExecutors.contains(slaveId) &&
-              //                offer.getExecutorIdsList.contains(slavesExecutors(slaveId).getExecutorId)) {
-              //                executor = slavesExecutors(slaveId)
-              //              }
-              //              else {
-              val execData = DataLoader.getExecutorDataBytes(env, config)
-              val executorId = taskId.getValue + "-" + UUID.randomUUID()
+            //            val slaveId = offer.getSlaveId.getValue
+            //            slavesExecutors.synchronized {
 
-              // TODO: move this into the runner provider somehow
-              copy(get(s"repo/src/${actionData.getSrc}"), get(s"dist/${jobManager.jobId}/${actionData.getName}/${actionData.getSrc}"), REPLACE_EXISTING)
+            val execData = DataLoader.getExecutorDataBytes(env, config)
+            val executorId = taskId.getValue + "-" + UUID.randomUUID()
+            //creating the command
 
-              println(s"===> ${runnerProvider.getCommand(jobManager.jobId, actionData, env, executorId, "")}")
-              val command = CommandInfo.newBuilder
-                .setValue(runnerProvider.getCommand(jobManager.jobId, actionData, env, executorId, ""))
-                .addUris(URI.newBuilder
-                  .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/executor-${config.version}-all.jar")
-                  .setExecutable(false)
-                  .setExtract(false)
-                  .build())
-
-
-              // Getting env.yaml
-              command.addUris(URI.newBuilder
-                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/${jobManager.jobId}/${actionData.getName}/env.yaml")
+            //                          // TODO: move this into the runner provider somehow
+            //                          if(!actionData.getSrc.isEmpty){
+            //                            copy(get(s"repo/src/${actionData.getSrc}"), get(s"dist/${jobManager.getJobId}/${actionData.getName}/${actionData.getSrc}"), REPLACE_EXISTING)
+            //                          }
+            val commandStr = runnerProvider.getCommand(jobManager.getJobId, actionData, env, executorId, "")
+            val command = CommandInfo
+              .newBuilder
+              .setValue(commandStr)
+              .addUris(URI.newBuilder
+                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/executor-${config.version}-all.jar")
                 .setExecutable(false)
-                .setExtract(true)
+                .setExtract(false)
                 .build())
 
-              // Getting runtime.yaml
+            // Getting framework (group) resources
+            frameworkProvider.getGroupResources.foreach(f => command.addUris(URI.newBuilder
+              .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/${f.getName}")
+              .setExecutable(false)
+              .setExtract(true)
+              .build()
+            ))
+
+            // Getting runner resources
+            runnerProvider.getRunnerResources.foreach(r => {
               command.addUris(URI.newBuilder
-                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/${jobManager.jobId}/${actionData.getName}/runtime.yaml")
-                .setExecutable(false)
-                .setExtract(true)
-                .build())
-
-              // Getting framework resources
-              frameworkProvider.getGroupResources.foreach(f => command.addUris(URI.newBuilder
-                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/${f.getName}")
-                .setExecutable(false)
-                .setExtract(true)
-                .build()))
-
-              // Getting runner resources
-              runnerProvider.getRunnerResources.foreach(r => command.addUris(URI.newBuilder
                 .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/$r")
                 .setExecutable(false)
                 .setExtract(false)
-                .build()))
-
-              // Getting action specific resources
-              runnerProvider.getActionResources(jobManager.jobId, actionData).foreach(r => command.addUris(URI.newBuilder
-                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/$r")
-                .setExecutable(false)
-                .setExtract(false)
-                .build()))
-
-              command
-                .addUris(URI.newBuilder()
-                  .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/miniconda.sh") //TODO: Nadav needs to clean this on the executor side
-                  .setExecutable(true)
-                  .setExtract(false)
-                  .build())
-                .addUris(URI.newBuilder()
-                  .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/amaterasu.properties")
-                  .setExecutable(false)
-                  .setExtract(false)
-                  .build())
-
-
-              val dockerBuilder = DockerInfo.newBuilder()
-                //.setImage("bento/centos-7.1") //TODO: change docker image
-                //.setImage("fedora/apache")
-                .setForcePullImage(true)
-                .setImage("mesosphere/spark:latest")
-                .setNetwork(DockerInfo.Network.BRIDGE)
-
-
-              container
-                .setType(ContainerInfo.Type.DOCKER)
-                .setDocker(dockerBuilder.build())
-
-
-              // setting the processes environment variables
-              val envVarsList = frameworkProvider.getEnvironmentVariables.asScala.toList.map(x => Variable.newBuilder().setName(x._1).setValue(x._2).build()).asJava
-              command.setEnvironment(Environment.newBuilder().addAllVariables(envVarsList))
-
-              executor = ExecutorInfo
-                .newBuilder
-                .setData(ByteString.copyFrom(execData))
-                .setName(taskId.getValue)
-                .setExecutorId(ExecutorID.newBuilder().setValue(executorId))
-                .setContainer(container)
-                .setCommand(command)
-
-                .build()
-
-              slavesExecutors.put(offer.getSlaveId.getValue, executor)
+                .build())
             }
-            //}
+            )
+
+            // Getting action dependencies
+            runnerProvider.getActionDependencies(jobManager.getJobId, actionData).foreach(r => {
+              command.addUris(URI.newBuilder
+                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/$r")
+                .setExecutable(false)
+                .setExtract(false)
+                .build())
+            }
+            )
+
+            // Getting action specific resources
+            runnerProvider.getActionResources(jobManager.getJobId, actionData).foreach(r => command.addUris(URI.newBuilder
+              .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/$r")
+              .setExecutable(false)
+              .setExtract(false)
+              .build()))
+
+            // setting up action executable
+            val sourcePath = new File(runnerProvider.getActionExecutable(jobManager.getJobId, actionData))
+            var executable: Path = null
+            if (actionData.getHasArtifact) {
+              val relativePath = amaDist.toPath.getRoot.relativize(sourcePath.toPath)
+              executable = relativePath.subpath(amaDist.toPath.getNameCount, relativePath.getNameCount)
+            } else {
+              val dest = new File(s"dist/${jobManager.getJobId}/${sourcePath.toString}")
+              FileUtils.moveFile(sourcePath, dest)
+              executable = Paths.get(jobManager.getJobId, sourcePath.toPath.toString)
+            }
+
+            println(s"===> executable $executable")
+            command.addUris(URI.newBuilder
+              .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/$executable")
+              .setExecutable(false)
+              .setExtract(false)
+              .build())
+
+            command
+              .addUris(URI.newBuilder()
+                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/miniconda.sh") //TODO: Nadav needs to clean this on the executor side
+                .setExecutable(true)
+                .setExtract(false)
+                .build())
+              .addUris(URI.newBuilder()
+                .setValue(s"http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/amaterasu.properties")
+                .setExecutable(false)
+                .setExtract(false)
+                .build())
+
+            // setting the processes environment variables
+            val envVarsList = frameworkProvider.getEnvironmentVariables.asScala.toList.map(x => Variable.newBuilder().setName(x._1).setValue(x._2).build()).asJava
+            command.setEnvironment(Environment.newBuilder().addAllVariables(envVarsList))
+
+            executor = ExecutorInfo
+              .newBuilder
+              .setData(ByteString.copyFrom(execData))
+              .setName(taskId.getValue)
+              .setExecutorId(ExecutorID.newBuilder().setValue(executorId))
+              .setCommand(command)
+
+              .build()
+
+            slavesExecutors.put(offer.getSlaveId.getValue, executor)
+
 
             val driverConfiguration = frameworkProvider.getDriverConfiguration
 
-            val actionTask = TaskInfo
-              .newBuilder
-              .setName(taskId.getValue)
-              .setTaskId(taskId)
-              .setSlaveId(offer.getSlaveId)
-              .setExecutor(executor)
+            var actionTask: TaskInfo = null
 
-              .setData(ByteString.copyFrom(DataLoader.getTaskDataBytes(actionData, env)))
-              .addResources(createScalarResource("cpus", driverConfiguration.getCPUs))
-              .addResources(createScalarResource("mem", driverConfiguration.getMemory))
-              .addResources(createScalarResource("disk", config.Jobs.repoSize))
-              .build()
+            if (runnerProvider.getHasExecutor) {
+              actionTask = TaskInfo
+                .newBuilder
+                .setName(taskId.getValue)
+                .setTaskId(taskId)
+                .setExecutor(executor)
 
-            log.info(s"launching task: ${offer.getId} with image: ${actionTask.getExecutor.getContainer.getDocker.getImage}")
-            driver.launchTasks(Collections.singleton(offer.getId), Collections.singleton(actionTask))
+                .setData(ByteString.copyFrom(DataLoader.getTaskDataBytes(actionData, env)))
+                .addResources(createScalarResource("cpus", driverConfiguration.getCpus))
+                .addResources(createScalarResource("mem", driverConfiguration.getMemory))
+                .addResources(createScalarResource("disk", config.jobs.repoSize))
+                .setSlaveId(offer.getSlaveId)
+                .build()
+
+              //driver.launchTasks(Collections.singleton(offer.getId), List(actionTask).asJava)
+            }
+            else {
+              actionTask = TaskInfo
+                .newBuilder
+                .setName(taskId.getValue)
+                .setTaskId(taskId)
+                .setCommand(command)
+
+                //.setData(ByteString.copyFrom(DataLoader.getTaskDataBytes(actionData, env)))
+                .addResources(createScalarResource("cpus", driverConfiguration.getCpus))
+                .addResources(createScalarResource("mem", driverConfiguration.getMemory))
+                .addResources(createScalarResource("disk", config.jobs.repoSize))
+                .setSlaveId(offer.getSlaveId)
+                .build()
+
+              //driver.launchTasks(Collections.singleton(offer.getId), List(actionTask).asJava)
+            }
+            driver.launchTasks(offer.getId, List(actionTask).asJava)
+
           }
-          else if (jobManager.outOfActions) {
-            log.info(s"framework ${jobManager.jobId} execution finished")
+          else if (jobManager.getOutOfActions) {
+            log.info(s"framework ${jobManager.getJobId} execution finished")
 
             val repo = new File("repo/")
             repo.delete()
@@ -321,9 +343,10 @@
             HttpServer.stop()
             driver.declineOffer(offer.getId)
             driver.stop()
+            sys.exit()
           }
           else {
-            log.info(s"Declining offer, no action ready for execution, action count=${jobManager.actionsCount()}")
+            log.info("Declining offer, no action ready for execution")
             driver.declineOffer(offer.getId)
           }
         }
@@ -349,7 +372,7 @@
         branch,
         frameworkId.getValue,
         client,
-        config.Jobs.Tasks.attempts,
+        config.jobs.tasks.attempts,
         new LinkedBlockingQueue[ActionData]()
       )
     }
@@ -358,7 +381,7 @@
       JobLoader.reloadJob(
         frameworkId.getValue,
         client,
-        config.Jobs.Tasks.attempts,
+        config.jobs.tasks.attempts,
         new LinkedBlockingQueue[ActionData]()
       )
 
@@ -370,7 +393,7 @@
 
     jobManager.start()
 
-    createJobDir(jobManager.jobId)
+    createJobDir(jobManager.getJobId)
 
   }
 
@@ -380,18 +403,18 @@
 
     var color = Console.WHITE
 
-    notification.notType match {
+    notification.getNotType match {
 
-      case NotificationType.info =>
+      case NotificationType.Info =>
         color = Console.WHITE
-        println(s"$color${Console.BOLD}===> ${notification.msg} ${Console.RESET}")
-      case NotificationType.success =>
+        println(s"$color${Console.BOLD}===> ${notification.getMsg} ${Console.RESET}")
+      case NotificationType.Success =>
         color = Console.GREEN
-        println(s"$color${Console.BOLD}===> ${notification.line} ${Console.RESET}")
-      case NotificationType.error =>
+        println(s"$color${Console.BOLD}===> ${notification.getLine} ${Console.RESET}")
+      case NotificationType.Error =>
         color = Console.RED
-        println(s"$color${Console.BOLD}===> ${notification.line} ${Console.RESET}")
-        println(s"$color${Console.BOLD}===> ${notification.msg} ${Console.RESET}")
+        println(s"$color${Console.BOLD}===> ${notification.getLine} ${Console.RESET}")
+        println(s"$color${Console.BOLD}===> ${notification.getMsg} ${Console.RESET}")
 
     }
 
@@ -426,7 +449,6 @@
       dir.mkdir()
     }
 
-
     new PrintWriter(s"$envLocation/$fileName") {
       write(configuration)
       close
@@ -459,7 +481,7 @@
     scheduler.src = src
     scheduler.branch = branch
     scheduler.env = env
-    scheduler.reportLevel = NotificationLevel.withName(report)
+    scheduler.reportLevel = NotificationLevel.valueOf(report.capitalize)
 
     val retryPolicy = new ExponentialBackoffRetry(1000, 3)
     scheduler.client = CuratorFrameworkFactory.newClient(config.zk, retryPolicy)
@@ -470,4 +492,4 @@
 
   }
 
-}
+}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/utilities/ActiveReportListener.scala b/leader/src/main/scala/org/apache/amaterasu/leader/utilities/ActiveReportListener.scala
deleted file mode 100644
index b3ffaad..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/utilities/ActiveReportListener.scala
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.utilities
-
-import javax.jms.{Message, MessageListener, TextMessage}
-import net.liftweb.json._
-import org.apache.amaterasu.common.execution.actions.{Notification, NotificationLevel, NotificationType}
-
-class ActiveReportListener extends MessageListener {
-
-  implicit val formats = DefaultFormats
-
-  override def onMessage(message: Message): Unit = {
-    message match {
-      case tm: TextMessage =>
-        try {
-          val notification = parseNot(parse(tm.getText))
-          printNotification(notification)
-
-        } catch {
-          case e: Exception => println(e.getMessage)
-        }
-      case _ => println("===> Unknown message")
-    }
-  }
-
-  private def parseNot(json: JValue): Notification = Notification(
-    (json \ "line").asInstanceOf[JString].values,
-    (json \ "msg").asInstanceOf[JString].values,
-    NotificationType.withName((json \ "notType" \ "name").asInstanceOf[JString].values),
-    NotificationLevel.withName((json \ "notLevel" \ "name").asInstanceOf[JString].values)
-  )
-
-
-  private def printNotification(notification: Notification): Unit = {
-
-    var color = Console.WHITE
-
-    notification.notType match {
-
-      case NotificationType.info =>
-        color = Console.WHITE
-        println(s"$color${Console.BOLD}===> ${notification.msg} ${Console.RESET}")
-      case NotificationType.success =>
-        color = Console.GREEN
-        println(s"$color${Console.BOLD}===> ${notification.line} ${Console.RESET}")
-      case NotificationType.error =>
-        color = Console.RED
-        println(s"$color${Console.BOLD}===> ${notification.line} ${Console.RESET}")
-        println(s"$color${Console.BOLD}===> ${notification.msg} ${Console.RESET}")
-
-    }
-
-  }
-}
-
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/utilities/HttpServer.scala b/leader/src/main/scala/org/apache/amaterasu/leader/utilities/HttpServer.scala
index 9dea8de..4aacd38 100644
--- a/leader/src/main/scala/org/apache/amaterasu/leader/utilities/HttpServer.scala
+++ b/leader/src/main/scala/org/apache/amaterasu/leader/utilities/HttpServer.scala
@@ -57,7 +57,7 @@
   }
 
   def stop() {
-    if (server == null) throw new IllegalStateException("Server not started")
+    if (server == null) throw new IllegalStateException("Server not Started")
 
     server.stop()
     server = null
@@ -65,7 +65,7 @@
 
   def initLogging(): Unit = {
     System.setProperty("org.eclipse.jetty.util.log.class", classOf[StdErrLog].getName)
-    Logger.getLogger("org.eclipse.jetty").setLevel(Level.OFF)
+    Logger.getLogger("org.eclipse.jetty").setLevel(Level.ALL)
     Logger.getLogger("org.eclipse.jetty.websocket").setLevel(Level.OFF)
   }
 
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/ApplicationMaster.scala b/leader/src/main/scala/org/apache/amaterasu/leader/yarn/ApplicationMaster.scala
deleted file mode 100644
index 23700f8..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/ApplicationMaster.scala
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn
-
-import java.io.{File, FileInputStream, InputStream}
-import java.net.{InetAddress, ServerSocket}
-import java.nio.ByteBuffer
-import java.util
-import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue}
-import javax.jms.Session
-
-import org.apache.activemq.ActiveMQConnectionFactory
-import org.apache.activemq.broker.BrokerService
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.leader.execution.frameworks.FrameworkProvidersFactory
-import org.apache.amaterasu.leader.execution.{JobLoader, JobManager}
-import org.apache.amaterasu.leader.utilities.{ActiveReportListener, Args}
-import org.apache.curator.framework.recipes.barriers.DistributedBarrier
-import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
-import org.apache.curator.retry.ExponentialBackoffRetry
-import org.apache.hadoop.fs.{FileSystem, Path}
-import org.apache.hadoop.io.DataOutputBuffer
-import org.apache.hadoop.security.UserGroupInformation
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest
-import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl
-import org.apache.hadoop.yarn.client.api.async.{AMRMClientAsync, NMClientAsync}
-import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier
-import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
-import org.apache.zookeeper.CreateMode
-
-import scala.collection.JavaConversions._
-import scala.collection.JavaConverters._
-import scala.collection.{concurrent, mutable}
-import scala.concurrent.ExecutionContext.Implicits.global
-import scala.concurrent.Future
-import scala.util.{Failure, Success}
-
-class ApplicationMaster extends Logging with AMRMClientAsync.CallbackHandler {
-
-  var capability: Resource = _
-
-  log.info("ApplicationMaster start")
-
-  private var jobManager: JobManager = _
-  private var client: CuratorFramework = _
-  private var config: ClusterConfig = _
-  private var env: String = _
-  private var branch: String = _
-  private var fs: FileSystem = _
-  private var conf: YarnConfiguration = _
-  private var propPath: String = ""
-  private var props: InputStream = _
-  private var jarPath: Path = _
-  private var executorPath: Path = _
-  private var executorJar: LocalResource = _
-  private var propFile: LocalResource = _
-  private var log4jPropFile: LocalResource = _
-  private var nmClient: NMClientAsync = _
-  private var allocListener: YarnRMCallbackHandler = _
-  private var rmClient: AMRMClientAsync[ContainerRequest] = _
-  private var address: String = _
-
-  private val containersIdsToTask: concurrent.Map[Long, ActionData] = new ConcurrentHashMap[Long, ActionData].asScala
-  private val completedContainersAndTaskIds: concurrent.Map[Long, String] = new ConcurrentHashMap[Long, String].asScala
-  private val actionsBuffer: java.util.concurrent.ConcurrentLinkedQueue[ActionData] = new java.util.concurrent.ConcurrentLinkedQueue[ActionData]()
-  private val host: String = InetAddress.getLocalHost.getHostName
-  private val broker: BrokerService = new BrokerService()
-
-  def setLocalResourceFromPath(path: Path): LocalResource = {
-
-    val stat = fs.getFileStatus(path)
-    val fileResource = Records.newRecord(classOf[LocalResource])
-
-    fileResource.setShouldBeUploadedToSharedCache(true)
-    fileResource.setVisibility(LocalResourceVisibility.PUBLIC)
-    fileResource.setResource(ConverterUtils.getYarnUrlFromPath(path))
-    fileResource.setSize(stat.getLen)
-    fileResource.setTimestamp(stat.getModificationTime)
-    fileResource.setType(LocalResourceType.FILE)
-    fileResource.setVisibility(LocalResourceVisibility.PUBLIC)
-    fileResource
-
-  }
-
-  def execute(arguments: Args): Unit = {
-
-    log.info(s"started AM with args $arguments")
-
-    propPath = System.getenv("PWD") + "/amaterasu.properties"
-    props = new FileInputStream(new File(propPath))
-
-    // no need for hdfs double check (nod to Aaron Rodgers)
-    // jars on HDFS should have been verified by the YARN client
-    conf = new YarnConfiguration()
-    fs = FileSystem.get(conf)
-
-    config = ClusterConfig(props)
-
-    try {
-      initJob(arguments)
-    } catch {
-      case e: Exception => log.error("error initializing ", e.getMessage)
-    }
-
-    // now that the job was initiated, the curator client is started and we can
-    // register the broker's address
-    client.create().withMode(CreateMode.PERSISTENT).forPath(s"/${jobManager.jobId}/broker")
-    client.setData().forPath(s"/${jobManager.jobId}/broker", address.getBytes)
-
-    // once the broker is registered, we can remove the barrier so clients can connect
-    log.info(s"/${jobManager.jobId}-report-barrier")
-    val barrier = new DistributedBarrier(client, s"/${jobManager.jobId}-report-barrier")
-    barrier.removeBarrier()
-
-    setupMessaging(jobManager.jobId)
-
-    log.info(s"Job ${jobManager.jobId} initiated with ${jobManager.registeredActions.size} actions")
-
-    jarPath = new Path(config.YARN.hdfsJarsPath)
-
-    // TODO: change this to read all dist folder and add to exec path
-    executorPath = Path.mergePaths(jarPath, new Path(s"/dist/executor-${config.version}-all.jar"))
-    log.info("Executor jar path is {}", executorPath)
-    executorJar = setLocalResourceFromPath(executorPath)
-    propFile = setLocalResourceFromPath(Path.mergePaths(jarPath, new Path("/amaterasu.properties")))
-    log4jPropFile = setLocalResourceFromPath(Path.mergePaths(jarPath, new Path("/log4j.properties")))
-
-    log.info("Started execute")
-
-    nmClient = new NMClientAsyncImpl(new YarnNMCallbackHandler())
-
-    // Initialize clients to ResourceManager and NodeManagers
-    nmClient.init(conf)
-    nmClient.start()
-
-    // TODO: awsEnv currently set to empty string. should be changed to read values from (where?).
-    allocListener = new YarnRMCallbackHandler(nmClient, jobManager, env, awsEnv = "", config, executorJar)
-
-    rmClient = startRMClient()
-    val registrationResponse = registerAppMaster("", 0, "")
-    val maxMem = registrationResponse.getMaximumResourceCapability.getMemory
-    log.info("Max mem capability of resources in this cluster " + maxMem)
-    val maxVCores = registrationResponse.getMaximumResourceCapability.getVirtualCores
-    log.info("Max vcores capability of resources in this cluster " + maxVCores)
-    log.info(s"Created jobManager. jobManager.registeredActions.size: ${jobManager.registeredActions.size}")
-
-    // Resource requirements for worker containers
-    this.capability = Records.newRecord(classOf[Resource])
-    val frameworkFactory = FrameworkProvidersFactory.apply(env, config)
-
-    while (!jobManager.outOfActions) {
-      val actionData = jobManager.getNextActionData
-      if (actionData != null) {
-
-        val frameworkProvider = frameworkFactory.providers(actionData.getGroupId)
-        val driverConfiguration = frameworkProvider.getDriverConfiguration
-
-        var mem: Int = driverConfiguration.getMemory
-        mem = Math.min(mem, maxMem)
-        this.capability.setMemory(mem)
-
-        var cpu = driverConfiguration.getCPUs
-        cpu = Math.min(cpu, maxVCores)
-        this.capability.setVirtualCores(cpu)
-
-        askContainer(actionData)
-      }
-    }
-
-    log.info("Finished asking for containers")
-  }
-
-  private def startRMClient(): AMRMClientAsync[ContainerRequest] = {
-    val client = AMRMClientAsync.createAMRMClientAsync[ContainerRequest](1000, this)
-    client.init(conf)
-    client.start()
-    client
-  }
-
-  private def registerAppMaster(host: String, port: Int, url: String) = {
-    // Register with ResourceManager
-    log.info("Registering application")
-    val registrationResponse = rmClient.registerApplicationMaster(host, port, url)
-    log.info("Registered application")
-    registrationResponse
-  }
-
-  private def setupMessaging(jobId: String): Unit = {
-
-    val cf = new ActiveMQConnectionFactory(address)
-    val conn = cf.createConnection()
-    conn.start()
-
-    val session = conn.createSession(false, Session.AUTO_ACKNOWLEDGE)
-    //TODO: move to a const in common
-    val destination = session.createTopic("JOB.REPORT")
-
-    val consumer = session.createConsumer(destination)
-    consumer.setMessageListener(new ActiveReportListener)
-
-  }
-
-  private def askContainer(actionData: ActionData): Unit = {
-
-    actionsBuffer.add(actionData)
-    log.info(s"About to ask container for action ${actionData.getId}. Action buffer size is: ${actionsBuffer.size()}")
-
-    // we have an action to schedule, let's request a container
-    val priority: Priority = Records.newRecord(classOf[Priority])
-    priority.setPriority(1)
-    val containerReq = new ContainerRequest(capability, null, null, priority)
-    rmClient.addContainerRequest(containerReq)
-    log.info(s"Asked container for action ${actionData.getId}")
-
-  }
-
-  override def onContainersAllocated(containers: util.List[Container]): Unit = {
-
-    log.info(s"${containers.size()} Containers allocated")
-    for (container <- containers.asScala) { // Launch container by create ContainerLaunchContext
-      if (actionsBuffer.isEmpty) {
-        log.warn(s"Why actionBuffer empty and i was called?. Container ids: ${containers.map(c => c.getId.getContainerId)}")
-        return
-      }
-
-      val actionData = actionsBuffer.poll()
-      val containerTask = Future[ActionData] {
-
-        val frameworkFactory = FrameworkProvidersFactory(env, config)
-        val framework = frameworkFactory.getFramework(actionData.getGroupId)
-        val runnerProvider = framework.getRunnerProvider(actionData.getTypeId)
-        val ctx = Records.newRecord(classOf[ContainerLaunchContext])
-        val commands: List[String] = List(runnerProvider.getCommand(jobManager.jobId, actionData, env, s"${actionData.getId}-${container.getId.getContainerId}", address))
-
-        log.info("Running container id {}.", container.getId.getContainerId)
-        log.info("Running container id {} with command '{}'", container.getId.getContainerId, commands.last)
-
-        ctx.setCommands(commands)
-        ctx.setTokens(allTokens)
-
-        val yarnJarPath = new Path(config.YARN.hdfsJarsPath)
-
-        //TODO Arun - Remove the hardcoding of the dist path
-        /*  val resources = mutable.Map[String, LocalResource]()
-          val binaryFileIter = fs.listFiles(new Path(s"${config.YARN.hdfsJarsPath}/dist"), false)
-          while (binaryFileIter.hasNext) {
-            val eachFile = binaryFileIter.next().getPath
-            resources (eachFile.getName) = setLocalResourceFromPath(fs.makeQualified(eachFile))
-          }
-          resources("log4j.properties") = setLocalResourceFromPath(fs.makeQualified(new Path(s"${config.YARN.hdfsJarsPath}/log4j.properties")))
-          resources ("amaterasu.properties") = setLocalResourceFromPath(fs.makeQualified(new Path(s"${config.YARN.hdfsJarsPath}/amaterasu.properties")))*/
-
-        val resources = mutable.Map[String, LocalResource](
-          "executor.jar" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path(s"/dist/executor-${config.version}-all.jar"))),
-          "spark-runner.jar" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path(s"/dist/spark-runner-${config.version}-all.jar"))),
-          "spark-runtime.jar" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path(s"/dist/spark-runtime-${config.version}.jar"))),
-          "amaterasu.properties" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/amaterasu.properties"))),
-          "log4j.properties" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/log4j.properties"))),
-          // TODO: Nadav/Eyal all of these should move to the executor resource setup
-          "miniconda.sh" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/dist/miniconda.sh"))),
-          "codegen.py" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/dist/codegen.py"))),
-          "runtime.py" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/dist/runtime.py"))),
-          "spark-version-info.properties" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/dist/spark-version-info.properties"))),
-          "spark_intp.py" -> setLocalResourceFromPath(Path.mergePaths(yarnJarPath, new Path("/dist/spark_intp.py"))))
-
-        //adding the framework and executor resources
-        setupResources(yarnJarPath, framework.getGroupIdentifier, resources, framework.getGroupIdentifier)
-        setupResources(yarnJarPath, s"${framework.getGroupIdentifier}/${actionData.getTypeId}", resources, s"${framework.getGroupIdentifier}-${actionData.getTypeId}")
-
-        ctx.setLocalResources(resources)
-
-        ctx.setEnvironment(Map[String, String](
-          "HADOOP_CONF_DIR" -> s"${config.YARN.hadoopHomeDir}/conf/",
-          "YARN_CONF_DIR" -> s"${config.YARN.hadoopHomeDir}/conf/",
-          "AMA_NODE" -> sys.env("AMA_NODE"),
-          "HADOOP_USER_NAME" -> UserGroupInformation.getCurrentUser.getUserName
-        ))
-
-        log.info(s"hadoop conf dir is ${config.YARN.hadoopHomeDir}/conf/")
-        nmClient.startContainerAsync(container, ctx)
-        actionData
-      }
-
-      containerTask onComplete {
-        case Failure(t) =>
-          log.error(s"launching container failed", t)
-          askContainer(actionData)
-
-        case Success(requestedActionData) =>
-          jobManager.actionStarted(requestedActionData.getId)
-          containersIdsToTask.put(container.getId.getContainerId, requestedActionData)
-          log.info(s"launching container succeeded: ${container.getId.getContainerId}; task: ${requestedActionData.getId}")
-
-      }
-    }
-  }
-
-  private def allTokens: ByteBuffer = {
-    // creating the credentials for container execution
-    val credentials = UserGroupInformation.getCurrentUser.getCredentials
-    val dob = new DataOutputBuffer
-    credentials.writeTokenStorageToStream(dob)
-
-    // removing the AM->RM token so that containers cannot access it.
-    val iter = credentials.getAllTokens.iterator
-    log.info("Executing with tokens:")
-    for (token <- iter) {
-      log.info(token.toString)
-      if (token.getKind == AMRMTokenIdentifier.KIND_NAME) iter.remove()
-    }
-    ByteBuffer.wrap(dob.getData, 0, dob.getLength)
-  }
-
-  private def setupResources(yarnJarPath: Path, frameworkPath: String, countainerResources: mutable.Map[String, LocalResource], resourcesPath: String): Unit = {
-
-    val sourcePath = Path.mergePaths(yarnJarPath, new Path(s"/$resourcesPath"))
-
-    if (fs.exists(sourcePath)) {
-
-      val files = fs.listFiles(sourcePath, true)
-
-      while (files.hasNext) {
-        val res = files.next()
-        val containerPath = res.getPath.toUri.getPath.replace("/apps/amaterasu/", "")
-        countainerResources.put(containerPath, setLocalResourceFromPath(res.getPath))
-      }
-    }
-  }
-
-  def stopApplication(finalApplicationStatus: FinalApplicationStatus, appMessage: String): Unit = {
-    import java.io.IOException
-
-    import org.apache.hadoop.yarn.exceptions.YarnException
-    try
-      rmClient.unregisterApplicationMaster(finalApplicationStatus, appMessage, null)
-    catch {
-      case ex: YarnException =>
-        log.error("Failed to unregister application", ex)
-      case e: IOException =>
-        log.error("Failed to unregister application", e)
-    }
-    rmClient.stop()
-    nmClient.stop()
-  }
-
-  override def onContainersCompleted(statuses: util.List[ContainerStatus]): Unit = {
-
-    for (status <- statuses.asScala) {
-
-      if (status.getState == ContainerState.COMPLETE) {
-
-        val containerId = status.getContainerId.getContainerId
-        val task = containersIdsToTask(containerId)
-        rmClient.releaseAssignedContainer(status.getContainerId)
-
-        val taskId = task.getId
-        if (status.getExitStatus == 0) {
-
-          //completedContainersAndTaskIds.put(containerId, task.id)
-          jobManager.actionComplete(taskId)
-          log.info(s"Container $containerId complete with task ${taskId} with success.")
-        } else {
-          // TODO: Check the getDiagnostics value and see if appropriate
-          jobManager.actionFailed(taskId, status.getDiagnostics)
-          log.warn(s"Container $containerId complete with task ${taskId} with failed status code (${status.getExitStatus})")
-        }
-      }
-    }
-
-    if (jobManager.outOfActions) {
-      log.info("Finished all tasks successfully! Wow!")
-      jobManager.actionsCount()
-      stopApplication(FinalApplicationStatus.SUCCEEDED, "SUCCESS")
-    } else {
-      log.info(s"jobManager.registeredActions.size: ${jobManager.registeredActions.size}; completedContainersAndTaskIds.size: ${completedContainersAndTaskIds.size}")
-    }
-  }
-
-  override def getProgress: Float = {
-    jobManager.registeredActions.size.toFloat / completedContainersAndTaskIds.size
-  }
-
-  override def onNodesUpdated(updatedNodes: util.List[NodeReport]): Unit = {
-    log.info("Nodes change. Nothing to report.")
-  }
-
-  override def onShutdownRequest(): Unit = {
-    log.error("Shutdown requested.")
-    stopApplication(FinalApplicationStatus.KILLED, "Shutdown requested")
-  }
-
-  override def onError(e: Throwable): Unit = {
-    log.error("Error on AM", e)
-    stopApplication(FinalApplicationStatus.FAILED, "Error on AM")
-  }
-
-  def initJob(args: Args): Unit = {
-
-    this.env = args.env
-    this.branch = args.branch
-    try {
-      val retryPolicy = new ExponentialBackoffRetry(1000, 3)
-      client = CuratorFrameworkFactory.newClient(config.zk, retryPolicy)
-      client.start()
-    } catch {
-      case e: Exception =>
-        log.error("Error connecting to zookeeper", e)
-        throw e
-    }
-    if (args.jobId != null && !args.jobId.isEmpty) {
-      log.info("resuming job" + args.jobId)
-      jobManager = JobLoader.reloadJob(
-        args.jobId,
-        client,
-        config.Jobs.Tasks.attempts,
-        new LinkedBlockingQueue[ActionData])
-
-    } else {
-      log.info("new job is being created")
-      try {
-
-        jobManager = JobLoader.loadJob(
-          args.repo,
-          args.branch,
-          args.newJobId,
-          client,
-          config.Jobs.Tasks.attempts,
-          new LinkedBlockingQueue[ActionData])
-      } catch {
-        case e: Exception =>
-          log.error("Error creating JobManager.", e)
-          throw e
-      }
-
-    }
-
-    jobManager.start()
-    log.info("started jobManager")
-  }
-}
-
-object ApplicationMaster extends Logging with App {
-
-
-  val parser = Args.getParser
-  parser.parse(args, Args()) match {
-
-    case Some(arguments: Args) =>
-      val appMaster = new ApplicationMaster()
-
-      appMaster.address = s"tcp://${appMaster.host}:$generatePort"
-      appMaster.broker.addConnector(appMaster.address)
-      appMaster.broker.start()
-
-      log.info(s"broker started with address ${appMaster.address}")
-      appMaster.execute(arguments)
-
-    case None =>
-  }
-
-  private def generatePort: Int = {
-    val socket = new ServerSocket(0)
-    val port = socket.getLocalPort
-    socket.close()
-    port
-  }
-}
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.scala b/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.scala
deleted file mode 100644
index 23f4af6..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnNMCallbackHandler.scala
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn
-
-import java.nio.ByteBuffer
-import java.util
-
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.hadoop.yarn.api.records.{ContainerId, ContainerStatus}
-import org.apache.hadoop.yarn.client.api.async.NMClientAsync
-
-
-class YarnNMCallbackHandler extends Logging with NMClientAsync.CallbackHandler {
-
-  override def onStartContainerError(containerId: ContainerId, t: Throwable): Unit = {
-    log.error(s"Container ${containerId.getContainerId} couldn't start.", t)
-  }
-
-  override def onGetContainerStatusError(containerId: ContainerId, t: Throwable): Unit = {
-    log.error(s"Couldn't get status from container ${containerId.getContainerId}.", t)
-  }
-
-  override def onContainerStatusReceived(containerId: ContainerId, containerStatus: ContainerStatus): Unit = {
-    log.info(s"Container ${containerId.getContainerId} has status of ${containerStatus.getState}")
-  }
-
-  override def onContainerStarted(containerId: ContainerId, allServiceResponse: util.Map[String, ByteBuffer]): Unit = {
-    log.info(s"Container ${containerId.getContainerId} started")
-  }
-
-  override def onStopContainerError(containerId: ContainerId, t: Throwable): Unit = {
-    log.error(s"Container ${containerId.getContainerId} has thrown an error", t)
-  }
-
-  override def onContainerStopped(containerId: ContainerId): Unit = {
-    log.info(s"Container ${containerId.getContainerId} stopped")
-  }
-
-}
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnRMCallbackHandler.scala b/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnRMCallbackHandler.scala
deleted file mode 100644
index 379dd1b..0000000
--- a/leader/src/main/scala/org/apache/amaterasu/leader/yarn/YarnRMCallbackHandler.scala
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.yarn
-
-import java.util
-import java.util.Collections
-import java.util.concurrent.ConcurrentHashMap
-
-import com.google.gson.Gson
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.common.logging.Logging
-import org.apache.amaterasu.leader.common.utilities.DataLoader
-import org.apache.amaterasu.leader.execution.JobManager
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.client.api.async.{AMRMClientAsync, NMClientAsync}
-import org.apache.hadoop.yarn.util.Records
-
-import scala.collection.JavaConversions._
-import scala.collection.JavaConverters._
-import scala.collection.concurrent
-import scala.concurrent.ExecutionContext.Implicits.global
-import scala.concurrent.{Future, _}
-import scala.util.{Failure, Success}
-
-class YarnRMCallbackHandler(nmClient: NMClientAsync,
-                            jobManager: JobManager,
-                            env: String,
-                            awsEnv: String,
-                            config: ClusterConfig,
-                            executorJar: LocalResource) extends Logging with AMRMClientAsync.CallbackHandler {
-
-
-  val gson:Gson = new Gson()
-  private val containersIdsToTaskIds: concurrent.Map[Long, String] = new ConcurrentHashMap[Long, String].asScala
-  private val completedContainersAndTaskIds: concurrent.Map[Long, String] = new ConcurrentHashMap[Long, String].asScala
-  private val failedTasksCounter: concurrent.Map[String, Int] = new ConcurrentHashMap[String, Int].asScala
-
-
-  override def onError(e: Throwable): Unit = {
-    println(s"ERROR: ${e.getMessage}")
-  }
-
-  override def onShutdownRequest(): Unit = {
-    println("Shutdown requested")
-  }
-
-  val MAX_ATTEMPTS_PER_TASK = 3
-
-  override def onContainersCompleted(statuses: util.List[ContainerStatus]): Unit = {
-    for (status <- statuses.asScala) {
-      if (status.getState == ContainerState.COMPLETE) {
-        val containerId = status.getContainerId.getContainerId
-        val taskId = containersIdsToTaskIds(containerId)
-        if (status.getExitStatus == 0) {
-          completedContainersAndTaskIds.put(containerId, taskId)
-          log.info(s"Container $containerId complete with task $taskId with success.")
-        } else {
-          log.warn(s"Container $containerId complete with task $taskId with failed status code (${status.getExitStatus}.")
-          val failedTries = failedTasksCounter.getOrElse(taskId, 0)
-          if (failedTries < MAX_ATTEMPTS_PER_TASK) {
-            // TODO: notify and ask for a new container
-            log.info("Retrying task")
-          } else {
-            log.error(s"Already tried task $taskId $MAX_ATTEMPTS_PER_TASK times. Time to say Bye-Bye.")
-            // TODO: die already
-          }
-        }
-      }
-    }
-    if (getProgress == 1F) {
-      log.info("Finished all tasks successfully! Wow!")
-    }
-  }
-
-  override def getProgress: Float = {
-    jobManager.registeredActions.size.toFloat / completedContainersAndTaskIds.size
-  }
-
-  override def onNodesUpdated(updatedNodes: util.List[NodeReport]): Unit = {
-  }
-
-  override def onContainersAllocated(containers: util.List[Container]): Unit = {
-    log.info("containers allocated")
-    for (container <- containers.asScala) { // Launch container by create ContainerLaunchContext
-      val containerTask = Future[String] {
-
-        val actionData = jobManager.getNextActionData
-        val taskData = DataLoader.getTaskData(actionData, env)
-        val execData = DataLoader.getExecutorData(env, config)
-
-        val ctx = Records.newRecord(classOf[ContainerLaunchContext])
-        val command = s"""$awsEnv env AMA_NODE=${sys.env("AMA_NODE")}
-                         | env SPARK_EXECUTOR_URI=http://${sys.env("AMA_NODE")}:${config.Webserver.Port}/dist/spark-${config.Webserver.sparkVersion}.tgz
-                         | java -cp executor.jar:spark-${config.Webserver.sparkVersion}/lib/*
-                         | -Dscala.usejavacp=true
-                         | -Djava.library.path=/usr/lib org.apache.amaterasu.executor.yarn.executors.ActionsExecutorLauncher
-                         | ${jobManager.jobId} ${config.master} ${actionData.getName} ${gson.toJson(taskData)} ${gson.toJson(execData)}""".stripMargin
-        ctx.setCommands(Collections.singletonList(command))
-
-        ctx.setLocalResources(Map[String, LocalResource] (
-          "executor.jar" -> executorJar
-        ))
-
-        nmClient.startContainerAsync(container, ctx)
-        actionData.getId
-      }
-
-      containerTask onComplete {
-        case Failure(t) => {
-          println(s"launching container failed: ${t.getMessage}")
-        }
-
-        case Success(actionDataId) => {
-          containersIdsToTaskIds.put(container.getId.getContainerId, actionDataId)
-          println(s"launching container succeeded: ${container.getId}")
-        }
-      }
-    }
-  }
-}
diff --git a/leader/src/main/scripts/ama-start-yarn.sh b/leader/src/main/scripts/ama-start-yarn.sh
index fe14f6e..0a6f63a 100755
--- a/leader/src/main/scripts/ama-start-yarn.sh
+++ b/leader/src/main/scripts/ama-start-yarn.sh
@@ -104,36 +104,36 @@
 export HADOOP_USER_CLASSPATH_FIRST=true
 export YARN_USER_CLASSPATH=${YARN_USER_CLASSPATH}:bin/*
 
-CMD="yarn jar ${BASEDIR}/bin/leader-0.2.0-incubating-rc4-all.jar org.apache.amaterasu.leader.yarn.Client --home ${BASEDIR}"
+CMD="yarn jar ${BASEDIR}/bin/leader-yarn-0.2.0-incubating-rc4-all.jar org.apache.amaterasu.leader.yarn.Client --home=${BASEDIR}"
 
 if [ -n "$REPO" ]; then
     echo "repo is ${REPO}"
-    CMD+=" --repo ${REPO}"
+    CMD+=" --repo=${REPO}"
 fi
 
 if [ -n "$BRANCH" ]; then
     echo "branch is ${BRANCH}"
-    CMD+=" --branch ${BRANCH}"
+    CMD+=" --branch=${BRANCH}"
 fi
 
 if [ -n "$ENV" ]; then
-    CMD+=" --env ${ENV}"
+    CMD+=" --env=${ENV}"
 fi
 
 if [ -n "$NAME" ]; then
-    CMD+=" --name ${NAME}"
+    CMD+=" --name=${NAME}"
 fi
 
 if [ -n "$JOBID" ]; then
-    CMD+=" --job-id ${JOBID}"
+    CMD+=" --job-id=${JOBID}"
 fi
 
 if [ -n "$REPORT" ]; then
-    CMD+=" --report ${REPORT}"
+    CMD+=" --report=${REPORT}"
 fi
 
 if [ -n "$JARPATH" ]; then
-    CMD+=" --jar-path ${JARPATH}"
+    CMD+=" --jar-path=${JARPATH}"
 fi
 
 echo $CMD
@@ -152,6 +152,7 @@
     #eval "hdfs dfs -copyFromLocal ${BASEDIR}/* /apps/amaterasu/"
 fi
 
+
 eval $CMD | grep "===>"
 kill $SERVER_PID
 
diff --git a/leader/src/test/resources/simple-maki.yml b/leader/src/test/resources/simple-maki.yml
index 151c89b..224fdd6 100755
--- a/leader/src/test/resources/simple-maki.yml
+++ b/leader/src/test/resources/simple-maki.yml
@@ -15,20 +15,24 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-#---
+#
+---
 job-name:    amaterasu-test
 flow:
     - name: start
       group: spark
       type: scala
       src: simple-spark.scala
+      config: start-cfg.yaml
     - name: step2
       group: spark
       type: scala
       src: file2.scala
+      config: step2-cfg.yaml
       error:
         name: error-action
         group: spark
         type: scala
         src: error.scala
-...
\ No newline at end of file
+        config: error-cfg.yaml
+...
diff --git a/leader/src/test/scala/org/apache/amaterasu/common/execution/ActionStatusTests.scala b/leader/src/test/scala/org/apache/amaterasu/common/execution/ActionStatusTests.scala
index 197c703..ca9186c 100755
--- a/leader/src/test/scala/org/apache/amaterasu/common/execution/ActionStatusTests.scala
+++ b/leader/src/test/scala/org/apache/amaterasu/common/execution/ActionStatusTests.scala
@@ -21,12 +21,12 @@
 
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.common.actions.SequentialAction
+import org.apache.amaterasu.leader.common.execution.actions.SequentialAction
 import org.apache.curator.framework.CuratorFrameworkFactory
 import org.apache.curator.retry.ExponentialBackoffRetry
 import org.apache.curator.test.TestingServer
 import org.apache.zookeeper.CreateMode
-import org.scalatest.{FlatSpec, Matchers}
+import org.scalatest.{DoNotDiscover, FlatSpec, Matchers}
 
 import scala.collection.JavaConverters._
 
@@ -36,7 +36,7 @@
   val retryPolicy = new ExponentialBackoffRetry(1000, 3)
   val server = new TestingServer(2181, true)
   val jobId = s"job_${System.currentTimeMillis}"
-  val data = new ActionData(ActionStatus.pending, "test_action", "start.scala", "spark","scala", "0000001", new util.HashMap() , List[String]().asJava)
+  val data = new ActionData(ActionStatus.Pending, "test_action", "start.scala", "", "spark","scala", "0000001", new util.HashMap() , List[String]().asJava)
 
   "an Action" should "queue it's ActionData int the job queue when executed" in {
 
@@ -47,7 +47,7 @@
     client.start()
 
     client.create().withMode(CreateMode.PERSISTENT).forPath(s"/$jobId")
-    val action = SequentialAction(data.getName, data.getSrc, data.getGroupId, data.getTypeId, Map.empty, jobId, queue, client, 1)
+    val action = new SequentialAction(data.getName, data.getSrc, "", data.getGroupId, data.getTypeId, Map.empty[String, String].asJava, jobId, queue, client, 1)
 
     action.execute()
     queue.peek().getName should be(data.getName)
@@ -55,7 +55,7 @@
 
   }
 
-  it should "also create a sequential znode for the task with the value of queued" in {
+  it should "also create a sequential znode for the task with the value of Queued" in {
 
     val client = CuratorFrameworkFactory.newClient(server.getConnectString, retryPolicy)
     client.start()
@@ -63,7 +63,7 @@
     val taskStatus = client.getData.forPath(s"/$jobId/task-0000000000")
 
     taskStatus should not be null
-    new String(taskStatus) should be("queued")
+    new String(taskStatus) should be("Queued")
 
   }
 
diff --git a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobExecutionTests.scala b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobExecutionTests.scala
index ef47cc1..eb9c1dc 100755
--- a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobExecutionTests.scala
+++ b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobExecutionTests.scala
@@ -19,7 +19,8 @@
 import java.util.concurrent.LinkedBlockingQueue
 
 import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.dsl.JobParser
+import org.apache.amaterasu.leader.common.dsl.JobParser
+import org.apache.amaterasu.leader.common.execution.actions.Action
 import org.apache.curator.framework.CuratorFrameworkFactory
 import org.apache.curator.retry.ExponentialBackoffRetry
 import org.apache.curator.test.TestingServer
@@ -40,76 +41,101 @@
   val queue = new LinkedBlockingQueue[ActionData]()
 
   // this will be performed by the job bootstraper
+
   client.create().withMode(CreateMode.PERSISTENT).forPath(s"/$jobId")
   //  client.setData().forPath(s"/$jobId/src",src.getBytes)
   //  client.setData().forPath(s"/$jobId/branch", branch.getBytes)
 
+
   val job = JobParser.parse(jobId, yaml, queue, client, 1)
 
   "a job" should "queue the first action when the JobManager.start method is called " in {
 
     job.start
-    queue.peek.getName should be ("start")
+
+    queue.peek.getName should be("start")
 
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000000")
-    new String(actionStatus) should be("queued")
+    new String(actionStatus) should be("Queued")
 
   }
 
   it should "return the start action when calling getNextAction and dequeue it" in {
 
-    job.getNextActionData.getName should be ("start")
-    queue.size should be (0)
+    job.getNextActionData.getName should be("start")
+    queue.size should be(0)
 
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000000")
-    new String(actionStatus) should be("started")
-
+    new String(actionStatus) should be("Started")
   }
 
-  it should "be marked as complete when the actionComplete method is called" in {
+  it should "not be out of actions when an action is still Pending" in {
+    job.getOutOfActions should be(false)
+  }
+
+  it should "be marked as Complete when the actionComplete method is called" in {
 
     job.actionComplete("0000000000")
 
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000000")
-    new String(actionStatus) should be("complete")
+
+    new String(new String(actionStatus)) should be("Complete")
 
   }
 
-  "the next step2 job" should "be queued as a result of the completion" in {
+  "the next step2 job" should "be Queued as a result of the completion" in {
 
-    queue.peek.getName should be ("step2")
+    queue.peek.getName should be("step2")
 
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000001")
-    new String(actionStatus) should be("queued")
+    new String(actionStatus) should be("Queued")
 
   }
 
-  it should "be marked as started when JobManager.getNextActionData is called" in {
+  it should "be marked as Started when JobManager.getNextActionData is called" in {
 
     val data = job.getNextActionData
 
-    data.getName should be ("step2")
+    data.getName should be("step2")
 
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000001")
-    new String(actionStatus) should be("started")
+    new String(actionStatus) should be("Started")
   }
 
-  it should "be marked as failed when JobManager. is called" in {
+  it should "be marked as Failed when JobManager.actionFailed is called" in {
 
     job.actionFailed("0000000001", "test failure")
-    queue.peek.getName should be ("error-action")
+    queue.peek.getName should be("error-action")
+  }
 
+  "an ErrorAction" should "be queued if one exist" in {
     // making sure that the status is reflected in zk
     val actionStatus = client.getData.forPath(s"/$jobId/task-0000000001-error")
-    new String(actionStatus) should be("queued")
+    new String(actionStatus) should be("Queued")
 
     // and returned by getNextActionData
     val data = job.getNextActionData
 
   }
+
+  it should "be marked as Complete when the actionComplete method is called" in {
+
+    job.actionComplete("0000000001-error")
+
+    // making sure that the status is reflected in zk
+    val actionStatus = client.getData.forPath(s"/$jobId/task-0000000001-error")
+
+    new String(new String(actionStatus)) should be("Complete")
+
+  }
+
+  it should " be out of actions when all actions have been executed" in {
+
+    job.getOutOfActions should be(true)
+  }
 }
diff --git a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobParserTests.scala b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobParserTests.scala
index 13685f9..5987b35 100755
--- a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobParserTests.scala
+++ b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobParserTests.scala
@@ -19,7 +19,7 @@
 import java.util.concurrent.LinkedBlockingQueue
 
 import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.dsl.JobParser
+import org.apache.amaterasu.leader.common.dsl.JobParser
 import org.apache.curator.framework.CuratorFrameworkFactory
 import org.apache.curator.retry.ExponentialBackoffRetry
 import org.apache.curator.test.TestingServer
@@ -31,34 +31,44 @@
 class JobParserTests extends FlatSpec with Matchers {
 
   val retryPolicy = new ExponentialBackoffRetry(1000, 3)
-  val server = new TestingServer(2182, true)
+  val server = new TestingServer(2187, true)
   val client = CuratorFrameworkFactory.newClient(server.getConnectString, retryPolicy)
   client.start()
 
-  val jobId = s"job_${System.currentTimeMillis}"
-  val yaml = Source.fromURL(getClass.getResource("/simple-maki.yml")).mkString
-  val queue = new LinkedBlockingQueue[ActionData]()
+  private val jobId = s"job_${System.currentTimeMillis}"
+  private val yaml = Source.fromURL(getClass.getResource("/simple-maki.yml")).mkString
+  private val queue = new LinkedBlockingQueue[ActionData]()
 
   // this will be performed by the job bootstrapper
   client.create().withMode(CreateMode.PERSISTENT).forPath(s"/$jobId")
 
-  val job = JobParser.parse(jobId, yaml, queue, client, 1)
+  private val job = JobParser.parse(jobId, yaml, queue, client, 1)
 
   "JobParser" should "parse the simple-maki.yml" in {
 
-    job.name should be("amaterasu-test")
+    job.getName should be("amaterasu-test")
 
   }
 
   //TODO: I suspect this test is not indicative, and that order is assured need to verify this
   it should "also have two actions in the right order" in {
 
-    job.registeredActions.size should be(3)
+    job.getRegisteredActions.size should be(3)
 
-    job.registeredActions.get("0000000000").get.data.getName should be("start")
-    job.registeredActions.get("0000000001").get.data.getName should be("step2")
-    job.registeredActions.get("0000000001-error").get.data.getName should be("error-action")
+    job.getRegisteredActions.get("0000000000").data.getName should be("start")
+    job.getRegisteredActions.get("0000000001").data.getName should be("step2")
+    job.getRegisteredActions.get("0000000001-error").data.getName should be("error-action")
 
   }
 
-}
\ No newline at end of file
+  it should "Action 'config' is parsed successfully" in {
+
+    job.getRegisteredActions.size should be(3)
+
+    job.getRegisteredActions.get("0000000000").data.getConfig should be("start-cfg.yaml")
+    job.getRegisteredActions.get("0000000001").data.getConfig should be("step2-cfg.yaml")
+    job.getRegisteredActions.get("0000000001-error").data.getConfig should be("error-cfg.yaml")
+
+  }
+
+}
diff --git a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobRestoreTests.scala b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobRestoreTests.scala
index 64887ab..7e78350 100755
--- a/leader/src/test/scala/org/apache/amaterasu/common/execution/JobRestoreTests.scala
+++ b/leader/src/test/scala/org/apache/amaterasu/common/execution/JobRestoreTests.scala
@@ -20,12 +20,13 @@
 
 import org.apache.amaterasu.common.configuration.enums.ActionStatus
 import org.apache.amaterasu.common.dataobjects.ActionData
-import org.apache.amaterasu.leader.execution.{JobLoader, JobManager}
+import org.apache.amaterasu.leader.common.execution.JobManager
+import org.apache.amaterasu.leader.execution.JobLoader
 import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
 import org.apache.curator.retry.ExponentialBackoffRetry
 import org.apache.curator.test.TestingServer
 import org.apache.zookeeper.CreateMode
-import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}
+import org.scalatest.{BeforeAndAfterEach, DoNotDiscover, FlatSpec, Matchers}
 
 import scala.io.Source
 
@@ -61,23 +62,23 @@
 
   }
 
-  "a restored job" should "have all queued actions in the executionQueue" in {
+  "a restored job" should "have all Queued actions in the executionQueue" in {
 
-    // setting task-0000000002 as queued
-    client.setData().forPath(s"/${jobId}/task-0000000002", ActionStatus.queued.toString.getBytes)
+    // setting task-0000000002 as Queued
+    client.setData().forPath(s"/${jobId}/task-0000000002", ActionStatus.Queued.toString.getBytes)
 
     JobLoader.restoreJobState(manager, jobId, client)
 
     queue.peek.getName should be("start")
   }
 
-  "a restored job" should "have all started actions in the executionQueue" in {
+  "a restored job" should "have all Started actions in the executionQueue" in {
 
-    // setting task-0000000002 as queued
-    client.setData().forPath(s"/${jobId}/task-0000000002", ActionStatus.started.toString.getBytes)
+    // setting task-0000000002 as Queued
+    client.setData().forPath(s"/${jobId}/task-0000000002", ActionStatus.Started.toString.getBytes)
 
     JobLoader.restoreJobState(manager, jobId, client)
 
     queue.peek.getName should be("start")
   }
-}
\ No newline at end of file
+}
diff --git a/leader/src/test/scala/org/apache/amaterasu/leader/mesos/ClusterSchedulerTests.scala b/leader/src/test/scala/org/apache/amaterasu/leader/mesos/ClusterSchedulerTests.scala
deleted file mode 100755
index af42677..0000000
--- a/leader/src/test/scala/org/apache/amaterasu/leader/mesos/ClusterSchedulerTests.scala
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.leader.mesos
-
-import org.apache.amaterasu.common.configuration.ClusterConfig
-import org.apache.amaterasu.leader.Kami
-import org.apache.amaterasu.leader.mesos.schedulers.ClusterScheduler
-import org.scalatest._
-
-class ClusterSchedulerTests extends FlatSpec with Matchers {
-
-  "an offer" should "be accepted if has enough resources" in {
-
-    val kami = Kami()
-    val config = ClusterConfig(getClass.getResourceAsStream("/amaterasu.properties"))
-    config.Jobs.cpus = 1
-    config.Jobs.mem = 1024
-    config.Jobs.repoSize = 1024
-
-    val scheduler = ClusterScheduler(kami, config)
-    val offer = MesosTestUtil.createOffer(2000, 2000, 2)
-    val res = scheduler.validateOffer(offer)
-
-    res should be(true)
-
-  }
-
-  it should "not be accepted if has missing resources" in {
-
-    val kami = Kami()
-    val config = ClusterConfig(getClass.getResourceAsStream("/amaterasu.properties"))
-    config.Jobs.cpus = 1
-    config.Jobs.mem = 1024
-    config.Jobs.repoSize = 1024
-
-    val scheduler = ClusterScheduler(kami, config)
-    val offer = MesosTestUtil.createOffer(2000, 128, 2)
-    val res = scheduler.validateOffer(offer)
-
-    res should be(false)
-
-  }
-
-}
\ No newline at end of file
diff --git a/project/build.properties b/project/build.properties
new file mode 100644
index 0000000..1fc4b80
--- /dev/null
+++ b/project/build.properties
@@ -0,0 +1 @@
+sbt.version=1.2.8
\ No newline at end of file
diff --git a/sdk/build.gradle b/sdk/build.gradle
index c5378b8..3cc9227 100644
--- a/sdk/build.gradle
+++ b/sdk/build.gradle
@@ -14,17 +14,81 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+buildscript {
+    ext.kotlin_version = '1.3.21'
+
+    repositories {
+        mavenCentral()
+        maven {
+            url 'http://repository.jetbrains.com/all'
+        }
+        maven {
+            url "https://jetbrains.jfrog.io/jetbrains/spek-snapshots"
+        }
+    }
+
+    dependencies {
+        classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
+        classpath 'org.junit.platform:junit-platform-gradle-plugin:1.0.0'
+    }
+}
+
+apply plugin: 'kotlin'
 apply plugin: 'java'
+apply plugin: 'org.junit.platform.gradle.plugin'
+
 apply plugin: "kotlin"
-repositories {
-    mavenCentral()
+
+junitPlatform {
+    filters {
+        engines {
+            include 'spek'
+        }
+    }
 }
 
 sourceCompatibility = 1.8
 targetCompatibility = 1.8
 
+repositories {
+    maven { url "https://plugins.gradle.org/m2/" }
+    maven { url 'http://repository.jetbrains.com/all' }
+    maven { url "https://jetbrains.jfrog.io/jetbrains/spek-snapshots" }
+    maven { url "http://dl.bintray.com/jetbrains/spek" }
+    maven { url "http://oss.jfrog.org/artifactory/oss-snapshot-local" }
+
+    mavenCentral()
+    jcenter()
+}
+
 dependencies {
     compile project(':common')
 
     testCompile group: 'junit', name: 'junit', version: '4.11'
+    compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
+    compile "org.jetbrains.kotlin:kotlin-reflect"
+
+    testCompile 'org.jetbrains.spek:spek-api:1.1.5'
+    testCompile "org.jetbrains.kotlin:kotlin-test-junit:$kotlin_version"
+    testRuntime 'org.jetbrains.spek:spek-junit-platform-engine:1.1.5'
+
+    // spek requires kotlin-reflect, can be omitted if already in the classpath
+    testRuntimeOnly "org.jetbrains.kotlin:kotlin-reflect:$kotlin_version"
+}
+
+sourceSets {
+    test {
+        resources.srcDirs += [file('src/test/resources')]
+    }
+}
+
+compileKotlin {
+    kotlinOptions {
+        jvmTarget = "1.8"
+    }
+}
+compileTestKotlin {
+    kotlinOptions {
+        jvmTarget = "1.8"
+    }
 }
diff --git a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.java b/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.java
deleted file mode 100644
index fc2eb9a..0000000
--- a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.sdk.frameworks;
-
-import org.apache.amaterasu.common.dataobjects.ActionData;
-
-public interface RunnerSetupProvider {
-
-    String getCommand(String jobId, ActionData actionData, String env, String executorId, String callbackAddress);
-
-    String[] getRunnerResources();
-
-    String[] getActionResources(String jobId, ActionData actionData);
-
-    String[] getActionDependencies(String jobId, ActionData actionData);
-
-}
\ No newline at end of file
diff --git a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.java b/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.java
deleted file mode 100644
index 8fe641c..0000000
--- a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.amaterasu.sdk.frameworks.configuration;
-
-public class DriverConfiguration {
-
-
-    private int memory = 0;
-    private int cpus = 0;
-
-    public DriverConfiguration(int memory, int cpus) {
-        this.memory = memory;
-        this.cpus = cpus;
-    }
-
-    public int getMemory() {
-        return memory;
-    }
-
-    public int getCPUs() {
-        return cpus;
-    }
-}
diff --git a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.java b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.kt
similarity index 62%
rename from sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.java
rename to sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.kt
index df150a0..d54c422 100644
--- a/sdk/src/main/java/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.java
+++ b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/FrameworkSetupProvider.kt
@@ -14,28 +14,27 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu.sdk.frameworks;
+package org.apache.amaterasu.sdk.frameworks
 
-import org.apache.amaterasu.common.configuration.ClusterConfig;
-import org.apache.amaterasu.sdk.frameworks.configuration.DriverConfiguration;
+import org.apache.amaterasu.common.configuration.ClusterConfig
+import org.apache.amaterasu.sdk.frameworks.configuration.DriverConfiguration
 
-import java.io.File;
-import java.util.Map;
+import java.io.File
 
-public interface FrameworkSetupProvider {
+interface FrameworkSetupProvider {
 
-    void init(String env, ClusterConfig conf);
+    val groupIdentifier: String
 
-    String getGroupIdentifier();
+    val groupResources: Array<File>
 
-    File[] getGroupResources();
+    val driverConfiguration: DriverConfiguration
 
-    DriverConfiguration getDriverConfiguration();
+    val environmentVariables: Map<String, String>
 
-    RunnerSetupProvider getRunnerProvider(String runnerId);
+    val configurationItems: Array<String>
 
-    Map<String, String> getEnvironmentVariables();
+    fun init(env: String, conf: ClusterConfig)
 
-    String[] getConfigurationItems();
+    fun getRunnerProvider(runnerId: String): RunnerSetupProvider
 
 }
\ No newline at end of file
diff --git a/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.kt b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.kt
new file mode 100644
index 0000000..9af488e
--- /dev/null
+++ b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProvider.kt
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.amaterasu.sdk.frameworks
+
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.apache.amaterasu.common.utils.ArtifactUtil
+import org.apache.amaterasu.common.utils.FileUtil
+
+abstract class RunnerSetupProvider {
+
+    private val actionFiles = arrayOf("env.yaml", "runtime.yaml", "datastores.yaml")
+
+    abstract val runnerResources: Array<String>
+
+    abstract fun getCommand(jobId: String, actionData: ActionData, env: String, executorId: String, callbackAddress: String): String
+
+    abstract fun getActionUserResources(jobId: String, actionData: ActionData): Array<String>
+
+    fun getActionResources(jobId: String, actionData: ActionData): Array<String> =
+            actionFiles.map { f -> "$jobId/${actionData.name}/$f" }.toTypedArray() +
+                    getActionUserResources(jobId, actionData)
+
+    abstract fun getActionDependencies(jobId: String, actionData: ActionData): Array<String>
+
+    fun getActionExecutable(jobId: String, actionData: ActionData): String {
+
+        // if the action is artifact based
+        return if (actionData.hasArtifact) {
+
+            val util = ArtifactUtil(listOf(actionData.repo), jobId)
+            util.getLocalArtifacts(actionData.artifact).first().path
+
+        } else {
+
+            //action src can be URL based, so we first check if it needs to be downloaded
+            val fileUtil = FileUtil()
+
+            if (fileUtil.isSupportedUrl(actionData.src)) {
+                fileUtil.downloadFile(actionData.src)
+            } else {
+                 //"repo/src/${actionData.name}/${actionData.src}"
+                 "repo/src/${actionData.src}"
+            }
+        }
+    }
+
+    abstract val hasExecutor: Boolean
+        get
+}
\ No newline at end of file
diff --git a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.kt
old mode 100755
new mode 100644
similarity index 85%
copy from leader/src/main/scala/org/apache/amaterasu/leader/package.scala
copy to sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.kt
index b7b0407..e3b566b
--- a/leader/src/main/scala/org/apache/amaterasu/leader/package.scala
+++ b/sdk/src/main/kotlin/org/apache/amaterasu/sdk/frameworks/configuration/DriverConfiguration.kt
@@ -14,8 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.amaterasu
+package org.apache.amaterasu.sdk.frameworks.configuration
 
-package object leader {
-
-}
+data class DriverConfiguration(val memory: Int = 0, val cpus: Int = 0)
\ No newline at end of file
diff --git a/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProviderTests.kt b/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProviderTests.kt
new file mode 100644
index 0000000..2bd2ed1
--- /dev/null
+++ b/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/RunnerSetupProviderTests.kt
@@ -0,0 +1,22 @@
+package org.apache.amaterasu.sdk.frameworks
+
+import org.apache.amaterasu.common.configuration.enums.ActionStatus
+import org.apache.amaterasu.common.dataobjects.ActionData
+import org.jetbrains.spek.api.Spek
+import org.jetbrains.spek.api.dsl.given
+import org.jetbrains.spek.api.dsl.it
+import kotlin.test.assertEquals
+
+object RunnerSetupProviderTests : Spek({
+
+    given("A class implementing RunnerSetupProvider") {
+
+        val testProvider = TestRunnerProvider()
+        val data = ActionData(ActionStatus.Started, "test", "test.scala", "spark", "scala-jar", "123")
+
+        it("adds the default action resource files to the user action resource files") {
+            val resources = testProvider.getActionResources("job", data)
+            assertEquals(resources.size, 4)
+        }
+    }
+})
\ No newline at end of file
diff --git a/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/TestRunnerProvider.kt b/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/TestRunnerProvider.kt
new file mode 100644
index 0000000..553f512
--- /dev/null
+++ b/sdk/src/test/kotlin/org/apache/amaterasu/sdk/frameworks/TestRunnerProvider.kt
@@ -0,0 +1,23 @@
+package org.apache.amaterasu.sdk.frameworks
+
+import org.apache.amaterasu.common.dataobjects.ActionData
+
+class TestRunnerProvider() : RunnerSetupProvider() {
+    override val hasExecutor: Boolean
+        get() = false
+
+    override val runnerResources: Array<String>
+        get() = arrayOf()
+
+    override fun getCommand(jobId: String, actionData: ActionData, env: String, executorId: String, callbackAddress: String): String {
+        return ""
+    }
+
+    override fun getActionUserResources(jobId: String, actionData: ActionData): Array<String> {
+        return arrayOf("testresource.yaml")
+    }
+
+    override fun getActionDependencies(jobId: String, actionData: ActionData): Array<String> {
+        return arrayOf()
+    }
+}
\ No newline at end of file
diff --git a/settings.gradle b/settings.gradle
index 9343001..bedede4 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -22,6 +22,13 @@
 include 'leader-common'
 project(':leader-common')
 
+include 'leader-mesos'
+project(':leader-mesos')
+
+
+include 'leader-yarn'
+project(':leader-yarn')
+
 include 'common'
 project(':common')