Adding support for using custom executors via the Aurora DSL
Bugs closed: AURORA-1981
Reviewed at https://reviews.apache.org/r/66154/
diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md
index b5d06c4..51ab6c7 100644
--- a/RELEASE-NOTES.md
+++ b/RELEASE-NOTES.md
@@ -1,3 +1,12 @@
+0.21.0 (unreleased)
+===================
+
+### New/updated:
+- Added `executor_config` field to the Job object of the DSL which will populate
+ `JobConfiguration.TaskConfig.ExecutorConfig`. This allows for using custom executors defined
+ through the `--custom_executor_config` scheduler flag. See our
+ [custom-executors](docs/features/custom-executors.md) documentation for more information.
+
0.20.0
===================
diff --git a/docs/features/custom-executors.md b/docs/features/custom-executors.md
index 1357c1e..99d2ec9 100644
--- a/docs/features/custom-executors.md
+++ b/docs/features/custom-executors.md
@@ -145,9 +145,22 @@
### Using a custom executor
-At this time, it is not currently possible create a job that runs on a custom executor using the default
-Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
-field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
-`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
-this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
-an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
+To launch tasks using a custom executor,
+an [ExecutorConfig](../reference/configuration.md#executorconfig-objects) object must be added to
+the Job or Service object. The `name` parameter of ExecutorConfig must match the name of an executor
+defined in the JSON object provided to the scheduler at startup time.
+
+For example, if we desire to launch tasks using `myExecutor` (defined above), we may do so in
+the following manner:
+
+```
+jobs = [Service(
+ task = task,
+ cluster = 'devcluster',
+ role = 'www-data',
+ environment = 'prod',
+ name = 'hello',
+ executor_config = ExecutorConfig(name='myExecutor'))]
+```
+
+This will create a Service Job which will launch tasks using myExecutor instead of Thermos.
diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md
index 725e073..d4b869b 100644
--- a/docs/reference/configuration.md
+++ b/docs/reference/configuration.md
@@ -358,6 +358,7 @@
```enable_hooks``` | Boolean | Whether to enable [Client Hooks](client-hooks.md) for this job. (Default: False)
```partition_policy``` | ```PartitionPolicy``` object | An optional partition policy that allows job owners to define how to handle partitions for running tasks (in partition-aware Aurora clusters)
```metadata``` | list of ```Metadata``` objects | list of ```Metadata``` objects for user's customized metadata information.
+ ```executor_config``` | ```ExecutorConfig``` object | Allows choosing an alternative executor defined in `custom_executor_config` to be used instead of Thermos. Tasks will be launched with Thermos as the executor by default. See [Custom Executors](../features/custom-executors.md) for more info.
### UpdateConfig Objects
@@ -420,6 +421,15 @@
```key``` | String | Indicate which metadata the user provides
```value``` | String | Provide the metadata content for corresponding key
+### ExecutorConfig Objects
+
+Describes an Executor name and data to pass to the Mesos Task
+
+| param | type | description
+| ------- | :-------: | --------
+| ```name``` | String | Name of the executor to use for this task. Must match the name of an executor in `custom_executor_config` or Thermos (`AuroraExecutor`). (Default: AuroraExecutor)
+| ```data``` | String | Data blob to pass on to the executor. (Default: "")
+
### Announcer Objects
If the `announce` field in the Job configuration is set, each task will be
diff --git a/src/main/python/apache/aurora/config/schema/base.py b/src/main/python/apache/aurora/config/schema/base.py
index 3d57d6a..a629bcd 100644
--- a/src/main/python/apache/aurora/config/schema/base.py
+++ b/src/main/python/apache/aurora/config/schema/base.py
@@ -19,6 +19,7 @@
from apache.thermos.config.schema import *
+from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
# TODO(wickman) Bind {{mesos.instance}} to %shard_id%
class MesosContext(Struct):
@@ -163,6 +164,9 @@
key = Required(String)
value = Required(String)
+class ExecutorConfig(Struct):
+ name = Default(String, AURORA_EXECUTOR_NAME)
+ data = Default(String, "")
class MesosJob(Struct):
name = Default(String, '{{task.name}}')
@@ -190,6 +194,7 @@
# TODO(wickman) Make Default(Any, LifecycleConfig()) once pystachio #17 is addressed.
lifecycle = Default(LifecycleConfig, DefaultLifecycleConfig)
task_links = Map(String, String) # Unsupported. See AURORA-739
+ executor_config = Default(ExecutorConfig, ExecutorConfig())
enable_hooks = Default(Boolean, False) # enable client API hooks; from env python-list 'hooks'
diff --git a/src/main/python/apache/aurora/config/thrift.py b/src/main/python/apache/aurora/config/thrift.py
index dcabb03..6d2dde6 100644
--- a/src/main/python/apache/aurora/config/thrift.py
+++ b/src/main/python/apache/aurora/config/thrift.py
@@ -337,9 +337,16 @@
if unbound:
raise InvalidConfig('Config contains unbound variables: %s' % ' '.join(map(str, unbound)))
- task.executorConfig = ExecutorConfig(
+ # set the executor that will be used by the Mesos task. Thermos is the default
+ executor = job.executor_config()
+ if fully_interpolated(executor.name()) == AURORA_EXECUTOR_NAME:
+ task.executorConfig = ExecutorConfig(
name=AURORA_EXECUTOR_NAME,
data=filter_aliased_fields(underlying).json_dumps())
+ else:
+ task.executorConfig = ExecutorConfig(
+ name=fully_interpolated(executor.name()),
+ data=fully_interpolated(executor.data()))
return JobConfiguration(
key=key,
diff --git a/src/test/python/apache/aurora/client/cli/test_inspect.py b/src/test/python/apache/aurora/client/cli/test_inspect.py
index 8c62480..e4f43d0 100644
--- a/src/test/python/apache/aurora/client/cli/test_inspect.py
+++ b/src/test/python/apache/aurora/client/cli/test_inspect.py
@@ -22,6 +22,8 @@
from apache.aurora.config.schema.base import Job
from apache.thermos.config.schema_base import MB, Process, Resources, Task
+from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
+
from .util import AuroraClientCommandTest
@@ -138,6 +140,10 @@
"production": False,
"role": "bozo",
"contact": "bozo@the.clown",
+ "executor_config": {
+ "name": AURORA_EXECUTOR_NAME,
+ "data": ""
+ },
"metadata": [],
"lifecycle": {
"http": {
diff --git a/src/test/python/apache/aurora/config/test_thrift.py b/src/test/python/apache/aurora/config/test_thrift.py
index 7bf0508..8e1d0e1 100644
--- a/src/test/python/apache/aurora/config/test_thrift.py
+++ b/src/test/python/apache/aurora/config/test_thrift.py
@@ -12,6 +12,7 @@
# limitations under the License.
#
+import json
import getpass
import re
@@ -24,6 +25,7 @@
Container,
Docker,
DockerImage,
+ ExecutorConfig,
HealthCheckConfig,
Job,
Mesos,
@@ -37,7 +39,7 @@
from apache.aurora.config.thrift import InvalidConfig, task_instance_from_job
from apache.thermos.config.schema import Process, Resources, Task
-from gen.apache.aurora.api.constants import GOOD_IDENTIFIER_PATTERN_PYTHON
+from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME, GOOD_IDENTIFIER_PATTERN_PYTHON
from gen.apache.aurora.api.ttypes import Mode as ThriftMode
from gen.apache.aurora.api.ttypes import (
CronCollisionPolicy,
@@ -60,6 +62,75 @@
)
)
+HELLO_WORLD_EXECUTOR_DATA = {
+ "environment": "staging66",
+ "health_check_config": {
+ "health_checker": {
+ "http": {
+ "expected_response_code": 0,
+ "endpoint": "/health",
+ "expected_response": "ok"
+ }
+ },
+ "min_consecutive_successes": 1,
+ "initial_interval_secs": 15.0,
+ "max_consecutive_failures": 0,
+ "timeout_secs": 1.0,
+ "interval_secs": 10.0
+ },
+ "name": "hello_world",
+ "service": False,
+ "max_task_failures": 1,
+ "executor_config": {
+ "data": "",
+ "name": "AuroraExecutor"
+ },
+ "cron_collision_policy": "KILL_EXISTING",
+ "enable_hooks": False,
+ "cluster": "smf1-test",
+ "task": {
+ "processes": [
+ {
+ "daemon": False,
+ "name": "hello_world",
+ "ephemeral": False,
+ "max_failures": 1,
+ "min_duration": 5,
+ "cmdline": "echo {{mesos.instance}}",
+ "final": False
+ }
+ ],
+ "name": "main",
+ "finalization_wait": 30,
+ "max_failures": 1,
+ "max_concurrency": 0,
+ "resources": {
+ "gpu": 2,
+ "disk": 67108864,
+ "ram": 67108864,
+ "cpu": 0.1
+ },
+ "constraints": [
+
+ ]
+ },
+ "production": False,
+ "role": "john_doe",
+ "metadata": [
+
+ ],
+ "lifecycle": {
+ "http": {
+ "graceful_shutdown_endpoint": "/quitquitquit",
+ "graceful_shutdown_wait_secs": 5,
+ "port": "health",
+ "shutdown_wait_secs": 5,
+ "shutdown_endpoint": "/abortabortabort"
+ }
+ },
+ "priority": 0
+}
+
def test_simple_config():
job = convert_pystachio_to_thrift(HELLO_WORLD, ports=frozenset(['health']))
@@ -310,6 +381,30 @@
assert metadata_tuples == expected_metadata_tuples
+def test_config_with_implicit_thermos_executor_config():
+ job = convert_pystachio_to_thrift(HELLO_WORLD())
+
+ assert str(job.taskConfig.executorConfig.name) == AURORA_EXECUTOR_NAME
+ assert json.loads(job.taskConfig.executorConfig.data) == HELLO_WORLD_EXECUTOR_DATA
+
+
+def test_config_with_explicit_thermos_executor_config():
+ job = convert_pystachio_to_thrift(
+ HELLO_WORLD(executor_config=ExecutorConfig(name=AURORA_EXECUTOR_NAME)))
+
+ assert str(job.taskConfig.executorConfig.name) == AURORA_EXECUTOR_NAME
+ assert json.loads(job.taskConfig.executorConfig.data) == HELLO_WORLD_EXECUTOR_DATA
+
+
+def test_config_with_custom_executor_config():
+ job = convert_pystachio_to_thrift(
+ HELLO_WORLD(executor_config=ExecutorConfig(
+ name="CustomExecutor", data="{test:'payload'}")))
+
+ assert str(job.taskConfig.executorConfig.name) == "CustomExecutor"
+ assert str(job.taskConfig.executorConfig.data) == "{test:'payload'}"
+
+
def test_task_instance_from_job():
instance = task_instance_from_job(
Job(health_check_config=HealthCheckConfig(interval_secs=30)), 0, '')