ARIA-149 Enhance operation configuration

* Also fixes ARIA-121, ARIA-190
* Parse special "dependencies" configuration parameters as YAML and
  treat as Parameter models, allowing them full use of intrinsic
  functions, type coersions, and validations
* Rename various functions that process "properties" to more generically
  process "parameters" (properties, inputs, attributes, arguments, etc.)
* The "configuration" field in OperationTemplate and Operation models
  is now now a dict of Parameter models
* Add "function" and "arguments" fields to Operation model to preserve
  user data (in "implementation" and "inputs") and to clearly demarcate
  orchestration data from user data; update task API accordingly
* Some cleanup of parser code touched by this commit
* Rename "create_parameters" to "merge_parameter_values" and improve
diff --git a/aria/cli/commands/services.py b/aria/cli/commands/services.py
index 24de7c5..476387c 100644
--- a/aria/cli/commands/services.py
+++ b/aria/cli/commands/services.py
@@ -151,7 +151,7 @@
     except storage_exceptions.StorageError as e:
         utils.check_overriding_storage_exceptions(e, 'service', service_name)
         raise
-    except modeling_exceptions.InputsException:
+    except modeling_exceptions.ParameterException:
         service_templates.print_service_template_inputs(model_storage, service_template_name,
                                                         logger)
         raise
diff --git a/aria/cli/execution_logging.py b/aria/cli/execution_logging.py
index b23165f..248ff7c 100644
--- a/aria/cli/execution_logging.py
+++ b/aria/cli/execution_logging.py
@@ -105,12 +105,12 @@
     # implementation
     if item.task:
         # operation task
-        implementation = item.task.implementation
-        inputs = dict(i.unwrap() for i in item.task.inputs.values())
+        implementation = item.task.function
+        inputs = dict(arg.unwrapped for arg in item.task.arguments.values())
     else:
         # execution task
         implementation = item.execution.workflow_name
-        inputs = dict(i.unwrap() for i in item.execution.inputs.values())
+        inputs = dict(inp.unwrapped for inp in item.execution.inputs.values())
 
     stylized_str = color.StringStylizer(_get_format())
     _populate_level(stylized_str, item)
diff --git a/aria/core.py b/aria/core.py
index cc943ef..f660167 100644
--- a/aria/core.py
+++ b/aria/core.py
@@ -56,7 +56,8 @@
         service_template = self.model_storage.service_template.get(service_template_id)
         if service_template.services:
             raise exceptions.DependentServicesError(
-                "Can't delete service template {0} - Service template has existing services")
+                'Can\'t delete service template `{0}` - service template has existing services'
+                .format(service_template.name))
 
         self.model_storage.service_template.delete(service_template)
         self.resource_storage.service_template.delete(entry_id=str(service_template.id))
@@ -87,7 +88,8 @@
                     consumption.CoerceServiceInstanceValues
                 )).consume()
             if context.validation.dump_issues():
-                raise exceptions.InstantiationError('Failed to instantiate service template')
+                raise exceptions.InstantiationError('Failed to instantiate service template `{0}`'
+                                                    .format(service_template.name))
 
         storage_session.flush()  # flushing so service.id would auto-populate
         service.name = service_name or '{0}_{1}'.format(service_template.name, service.id)
@@ -100,15 +102,15 @@
         active_executions = [e for e in service.executions if e.is_active()]
         if active_executions:
             raise exceptions.DependentActiveExecutionsError(
-                "Can't delete service {0} - there is an active execution for this service. "
-                "Active execution id: {1}".format(service.name, active_executions[0].id))
+                'Can\'t delete service `{0}` - there is an active execution for this service. '
+                'Active execution ID: {1}'.format(service.name, active_executions[0].id))
 
         if not force:
             available_nodes = [str(n.id) for n in service.nodes.values() if n.is_available()]
             if available_nodes:
                 raise exceptions.DependentAvailableNodesError(
-                    "Can't delete service {0} - there are available nodes for this service. "
-                    "Available node ids: {1}".format(service.name, ', '.join(available_nodes)))
+                    'Can\'t delete service `{0}` - there are available nodes for this service. '
+                    'Available node IDs: {1}'.format(service.name, ', '.join(available_nodes)))
 
         self.model_storage.service.delete(service)
 
diff --git a/aria/modeling/contraints.py b/aria/modeling/constraints.py
similarity index 100%
rename from aria/modeling/contraints.py
rename to aria/modeling/constraints.py
diff --git a/aria/modeling/exceptions.py b/aria/modeling/exceptions.py
index 19fd942..d0e3e22 100644
--- a/aria/modeling/exceptions.py
+++ b/aria/modeling/exceptions.py
@@ -22,9 +22,9 @@
     """
 
 
-class InputsException(ModelingException):
+class ParameterException(ModelingException):
     """
-    ARIA inputs exception.
+    ARIA parameter exception.
     """
     pass
 
@@ -41,19 +41,19 @@
     """
 
 
-class MissingRequiredInputsException(InputsException):
+class MissingRequiredParametersException(ParameterException):
     """
-    ARIA modeling exception: Required inputs have been omitted.
+    ARIA modeling exception: Required parameters have been omitted.
     """
 
 
-class InputsOfWrongTypeException(InputsException):
+class ParametersOfWrongTypeException(ParameterException):
     """
-    ARIA modeling exception: Inputs of the wrong types have been provided.
+    ARIA modeling exception: Parameters of the wrong types have been provided.
     """
 
 
-class UndeclaredInputsException(InputsException):
+class UndeclaredParametersException(ParameterException):
     """
-    ARIA modeling exception: Undeclared inputs have been provided.
+    ARIA modeling exception: Undeclared parameters have been provided.
     """
diff --git a/aria/modeling/orchestration.py b/aria/modeling/orchestration.py
index ab9d34d..97de552 100644
--- a/aria/modeling/orchestration.py
+++ b/aria/modeling/orchestration.py
@@ -230,10 +230,10 @@
     :vartype relationship: :class:`Relationship`
     :ivar plugin: The implementing plugin (set to None for default execution plugin)
     :vartype plugin: :class:`Plugin`
-    :ivar inputs: Parameters that can be used by this task
-    :vartype inputs: {basestring: :class:`Parameter`}
-    :ivar implementation: Python path to an ``@operation`` function
-    :vartype implementation: basestring
+    :ivar function: Python path to an ``@operation`` function
+    :vartype function: basestring
+    :ivar arguments: Arguments that can be used by this task
+    :vartype arguments: {basestring: :class:`Parameter`}
     :ivar max_attempts: Maximum number of retries allowed in case of failure
     :vartype max_attempts: int
     :ivar retry_interval: Interval between retries (in seconds)
@@ -300,10 +300,10 @@
         return relationship.many_to_one(cls, 'execution')
 
     @declared_attr
-    def inputs(cls):
-        return relationship.many_to_many(cls, 'parameter', prefix='inputs', dict_key='name')
+    def arguments(cls):
+        return relationship.many_to_many(cls, 'parameter', prefix='arguments', dict_key='name')
 
-    implementation = Column(String)
+    function = Column(String)
     max_attempts = Column(Integer, default=1)
     retry_interval = Column(Float, default=0)
     ignore_failure = Column(Boolean, default=False)
diff --git a/aria/modeling/service_common.py b/aria/modeling/service_common.py
index ef19c8e..8f844a2 100644
--- a/aria/modeling/service_common.py
+++ b/aria/modeling/service_common.py
@@ -203,7 +203,8 @@
         if self.description:
             console.puts(context.style.meta(self.description))
 
-    def unwrap(self):
+    @property
+    def unwrapped(self):
         return self.name, self.value
 
     @classmethod
@@ -211,6 +212,10 @@
         """
         Wraps an arbitrary value as a parameter. The type will be guessed via introspection.
 
+        For primitive types, we will prefer their TOSCA aliases. See the `TOSCA Simple Profile v1.0
+        cos01 specification <http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01
+        /TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc373867862>`__
+
         :param name: Parameter name
         :type name: basestring
         :param value: Parameter value
diff --git a/aria/modeling/service_instance.py b/aria/modeling/service_instance.py
index 7058969..703873e 100644
--- a/aria/modeling/service_instance.py
+++ b/aria/modeling/service_instance.py
@@ -31,7 +31,11 @@
 from ..orchestrator import execution_plugin
 from ..parser import validation
 from ..parser.consumption import ConsumptionContext
-from ..utils import collections, formatting, console
+from ..utils import (
+    collections,
+    formatting,
+    console
+)
 from . import (
     relationship,
     utils,
@@ -1631,20 +1635,24 @@
     :vartype operation_template: :class:`OperationTemplate`
     :ivar description: Human-readable description
     :vartype description: string
-    :ivar plugin: Associated plugin
-    :vartype plugin: :class:`Plugin`
     :ivar relationship_edge: When true specified that the operation is on the relationship's
                              target edge instead of its source (only used by relationship
                              operations)
     :vartype relationship_edge: bool
     :ivar implementation: Implementation (interpreted by the plugin)
     :vartype implementation: basestring
-    :ivar configuration: Configuration (interpreted by the plugin)
-    :vartype configuration: {basestring, object}
     :ivar dependencies: Dependency strings (interpreted by the plugin)
     :vartype dependencies: [basestring]
     :ivar inputs: Parameters that can be used by this operation
     :vartype inputs: {basestring: :class:`Parameter`}
+    :ivar plugin: Associated plugin
+    :vartype plugin: :class:`Plugin`
+    :ivar configuration: Configuration (interpreted by the plugin)
+    :vartype configuration: {basestring, :class:`Parameter`}
+    :ivar function: Name of the operation function
+    :vartype function: basestring
+    :ivar arguments: Arguments to send to the operation function
+    :vartype arguments: {basestring: :class:`Parameter`}
     :ivar executor: Name of executor to run the operation with
     :vartype executor: basestring
     :ivar max_attempts: Maximum number of attempts allowed in case of failure
@@ -1726,34 +1734,55 @@
     def inputs(cls):
         return relationship.many_to_many(cls, 'parameter', prefix='inputs', dict_key='name')
 
+    @declared_attr
+    def configuration(cls):
+        return relationship.many_to_many(cls, 'parameter', prefix='configuration', dict_key='name')
+
+    @declared_attr
+    def arguments(cls):
+        return relationship.many_to_many(cls, 'parameter', prefix='arguments', dict_key='name')
+
     # endregion
 
     description = Column(Text)
     relationship_edge = Column(Boolean)
     implementation = Column(Text)
-    configuration = Column(modeling_types.StrictDict(key_cls=basestring))
     dependencies = Column(modeling_types.StrictList(item_cls=basestring))
+    function = Column(Text)
     executor = Column(Text)
     max_attempts = Column(Integer)
     retry_interval = Column(Integer)
 
     def configure(self):
-        from . import models
-        # Note: for workflows (operations attached directly to the service) "interface" will be None
-        if (self.implementation is None) or (self.interface is None):
+        if (self.implementation is None) and (self.function is None):
             return
 
-        if self.plugin is None:
-            arguments = execution_plugin.instantiation.configure_operation(self)
+        if (self.interface is not None) and (self.plugin is None) and (self.function is None):
+            # ("interface" is None for workflow operations, which do not currently use "plugin")
+            # The default (None) plugin is the execution plugin
+            execution_plugin.instantiation.configure_operation(self)
         else:
             # In the future plugins may be able to add their own "configure_operation" hook that
-            # can validate the configuration and otherwise return specially derived arguments
-            arguments = self.configuration
+            # can validate the configuration and otherwise create specially derived arguments. For
+            # now, we just send all configuration parameters as arguments without validation.
+            utils.instantiate_dict(self, self.arguments, self.configuration)
 
-        # Note: the arguments will *override* operation inputs of the same name
-        if arguments:
-            for k, v in arguments.iteritems():
-                self.inputs[k] = models.Parameter.wrap(k, v)
+        # Send all inputs as extra arguments
+        # Note that they will override existing arguments of the same names
+        utils.instantiate_dict(self, self.arguments, self.inputs)
+
+        # Check for reserved arguments
+        from ..orchestrator.decorators import OPERATION_DECORATOR_RESERVED_ARGUMENTS
+        used_reserved_names = \
+            OPERATION_DECORATOR_RESERVED_ARGUMENTS.intersection(self.arguments.keys())
+        if used_reserved_names:
+            context = ConsumptionContext.get_thread_local()
+            context.validation.report('using reserved arguments in node "{0}": {1}'
+                                      .format(
+                                          self.name,
+                                          formatting.string_list_as_string(used_reserved_names)),
+                                      level=validation.Issue.EXTERNAL)
+
 
     @property
     def as_raw(self):
@@ -1762,17 +1791,18 @@
             ('description', self.description),
             ('implementation', self.implementation),
             ('dependencies', self.dependencies),
-            ('executor', self.executor),
-            ('max_attempts', self.max_attempts),
-            ('retry_interval', self.retry_interval),
             ('inputs', formatting.as_raw_dict(self.inputs))))
 
     def validate(self):
-        # TODO must be associated with interface or service
+        # TODO must be associated with either interface or service
         utils.validate_dict_values(self.inputs)
+        utils.validate_dict_values(self.configuration)
+        utils.validate_dict_values(self.arguments)
 
     def coerce_values(self, report_issues):
         utils.coerce_dict_values(self.inputs, report_issues)
+        utils.coerce_dict_values(self.configuration, report_issues)
+        utils.coerce_dict_values(self.arguments, report_issues)
 
     def dump(self):
         context = ConsumptionContext.get_thread_local()
@@ -1780,21 +1810,14 @@
         if self.description:
             console.puts(context.style.meta(self.description))
         with context.style.indent:
-            if self.plugin is not None:
-                console.puts('Plugin: {0}'.format(
-                    context.style.literal(self.plugin.name)))
             if self.implementation is not None:
                 console.puts('Implementation: {0}'.format(
                     context.style.literal(self.implementation)))
-            if self.configuration:
-                with context.style.indent:
-                    for k, v in self.configuration.iteritems():
-                        console.puts('{0}: {1}'.format(context.style.property(k),
-                                                       context.style.literal(v)))
             if self.dependencies:
                 console.puts(
                     'Dependencies: {0}'.format(
                         ', '.join((str(context.style.literal(v)) for v in self.dependencies))))
+            utils.dump_dict_values(self.inputs, 'Inputs')
             if self.executor is not None:
                 console.puts('Executor: {0}'.format(context.style.literal(self.executor)))
             if self.max_attempts is not None:
@@ -1802,7 +1825,13 @@
             if self.retry_interval is not None:
                 console.puts('Retry interval: {0}'.format(
                     context.style.literal(self.retry_interval)))
-            utils.dump_dict_values(self.inputs, 'Inputs')
+            if self.plugin is not None:
+                console.puts('Plugin: {0}'.format(
+                    context.style.literal(self.plugin.name)))
+            utils.dump_dict_values(self.configuration, 'Configuration')
+            if self.function is not None:
+                console.puts('Function: {0}'.format(context.style.literal(self.function)))
+            utils.dump_dict_values(self.arguments, 'Arguments')
 
 
 class ArtifactBase(InstanceModelMixin):
diff --git a/aria/modeling/service_template.py b/aria/modeling/service_template.py
index 3110248..42e0d01 100644
--- a/aria/modeling/service_template.py
+++ b/aria/modeling/service_template.py
@@ -287,7 +287,7 @@
                                  service_template=self)
         context.modeling.instance = service
 
-        service.inputs = utils.create_inputs(inputs or {}, self.inputs)
+        service.inputs = utils.merge_parameter_values(inputs, self.inputs)
         # TODO: now that we have inputs, we should scan properties and inputs and evaluate functions
 
         for plugin_specification in self.plugin_specifications.itervalues():
@@ -1762,20 +1762,22 @@
     :vartype name: basestring
     :ivar description: Human-readable description
     :vartype description: basestring
-    :ivar plugin_specification: Associated plugin
-    :vartype plugin_specification: :class:`PluginSpecification`
     :ivar relationship_edge: When true specified that the operation is on the relationship's
                              target edge instead of its source (only used by relationship
                              operations)
     :vartype relationship_edge: bool
     :ivar implementation: Implementation (interpreted by the plugin)
     :vartype implementation: basestring
-    :ivar configuration: Configuration (interpreted by the plugin)
-    :vartype configuration: {basestring, object}
     :ivar dependencies: Dependency strings (interpreted by the plugin)
     :vartype dependencies: [basestring]
     :ivar inputs: Parameters that can be used by this operation
     :vartype inputs: {basestring: :class:`Parameter`}
+    :ivar plugin_specification: Associated plugin
+    :vartype plugin_specification: :class:`PluginSpecification`
+    :ivar configuration: Configuration (interpreted by the plugin)
+    :vartype configuration: {basestring, :class:`Parameter`}
+    :ivar function: Name of the operation function
+    :vartype function: basestring
     :ivar executor: Name of executor to run the operation with
     :vartype executor: basestring
     :ivar max_attempts: Maximum number of attempts allowed in case of failure
@@ -1855,13 +1857,17 @@
     def inputs(cls):
         return relationship.many_to_many(cls, 'parameter', prefix='inputs', dict_key='name')
 
+    @declared_attr
+    def configuration(cls):
+        return relationship.many_to_many(cls, 'parameter', prefix='configuration', dict_key='name')
+
     # endregion
 
     description = Column(Text)
     relationship_edge = Column(Boolean)
     implementation = Column(Text)
-    configuration = Column(modeling_types.StrictDict(key_cls=basestring))
     dependencies = Column(modeling_types.StrictList(item_cls=basestring))
+    function = Column(Text)
     executor = Column(Text)
     max_attempts = Column(Integer)
     retry_interval = Column(Integer)
@@ -1873,48 +1879,39 @@
             ('description', self.description),
             ('implementation', self.implementation),
             ('dependencies', self.dependencies),
-            ('executor', self.executor),
-            ('max_attempts', self.max_attempts),
-            ('retry_interval', self.retry_interval),
             ('inputs', formatting.as_raw_dict(self.inputs))))
 
     def instantiate(self, container):
         from . import models
-        if self.plugin_specification:
-            if self.plugin_specification.enabled:
-                plugin = self.plugin_specification.plugin
-                implementation = self.implementation if plugin is not None else None
-                # "plugin" would be none if a match was not found. In that case, a validation error
-                # should already have been reported in ServiceTemplateBase.instantiate, so we will
-                # continue silently here
-            else:
-                # If the plugin is disabled, the operation should be disabled, too
-                plugin = None
-                implementation = None
-        else:
-            # Using the execution plugin
-            plugin = None
-            implementation = self.implementation
+
+        plugin = self.plugin_specification.plugin \
+            if (self.plugin_specification is not None) and self.plugin_specification.enabled \
+            else None
 
         operation = models.Operation(name=self.name,
                                      description=deepcopy_with_locators(self.description),
                                      relationship_edge=self.relationship_edge,
-                                     plugin=plugin,
-                                     implementation=implementation,
-                                     configuration=self.configuration,
+                                     implementation=self.implementation,
                                      dependencies=self.dependencies,
                                      executor=self.executor,
+                                     plugin=plugin,
+                                     function=self.function,
                                      max_attempts=self.max_attempts,
                                      retry_interval=self.retry_interval,
                                      operation_template=self)
+
         utils.instantiate_dict(container, operation.inputs, self.inputs)
+        utils.instantiate_dict(container, operation.configuration, self.configuration)
+
         return operation
 
     def validate(self):
         utils.validate_dict_values(self.inputs)
+        utils.validate_dict_values(self.configuration)
 
     def coerce_values(self, report_issues):
         utils.coerce_dict_values(self.inputs, report_issues)
+        utils.coerce_dict_values(self.configuration, report_issues)
 
     def dump(self):
         context = ConsumptionContext.get_thread_local()
@@ -1922,20 +1919,13 @@
         if self.description:
             console.puts(context.style.meta(self.description))
         with context.style.indent:
-            if self.plugin_specification is not None:
-                console.puts('Plugin specification: {0}'.format(
-                    context.style.literal(self.plugin_specification.name)))
             if self.implementation is not None:
                 console.puts('Implementation: {0}'.format(
                     context.style.literal(self.implementation)))
-            if self.configuration:
-                with context.style.indent:
-                    for k, v in self.configuration.iteritems():
-                        console.puts('{0}: {1}'.format(context.style.property(k),
-                                                       context.style.literal(v)))
             if self.dependencies:
                 console.puts('Dependencies: {0}'.format(
                     ', '.join((str(context.style.literal(v)) for v in self.dependencies))))
+            utils.dump_dict_values(self.inputs, 'Inputs')
             if self.executor is not None:
                 console.puts('Executor: {0}'.format(context.style.literal(self.executor)))
             if self.max_attempts is not None:
@@ -1943,7 +1933,12 @@
             if self.retry_interval is not None:
                 console.puts('Retry interval: {0}'.format(
                     context.style.literal(self.retry_interval)))
-            utils.dump_dict_values(self.inputs, 'Inputs')
+            if self.plugin_specification is not None:
+                console.puts('Plugin specification: {0}'.format(
+                    context.style.literal(self.plugin_specification.name)))
+            utils.dump_dict_values(self.configuration, 'Configuration')
+            if self.function is not None:
+                console.puts('Function: {0}'.format(context.style.literal(self.function)))
 
 
 class ArtifactTemplateBase(TemplateModelMixin):
diff --git a/aria/modeling/utils.py b/aria/modeling/utils.py
index 0404fe4..66e9c99 100644
--- a/aria/modeling/utils.py
+++ b/aria/modeling/utils.py
@@ -21,6 +21,8 @@
 from ..parser.consumption import ConsumptionContext
 from ..utils.console import puts
 from ..utils.type import validate_value_type
+from ..utils.collections import OrderedDict
+from ..utils.formatting import string_list_as_string
 
 
 class ModelJSONEncoder(JSONEncoder):
@@ -39,7 +41,7 @@
 class NodeTemplateContainerHolder(object):
     """
     Wrapper that allows using a :class:`aria.modeling.models.NodeTemplate` model directly as the
-    ``container_holder`` argument for :func:`aria.modeling.functions.evaluate`.
+    ``container_holder`` input for :func:`aria.modeling.functions.evaluate`.
     """
 
     def __init__(self, node_template):
@@ -51,74 +53,84 @@
         return self.container.service_template
 
 
-def create_inputs(inputs, template_inputs):
+def merge_parameter_values(parameter_values, declared_parameters):
     """
-    :param inputs: key-value dict
-    :param template_inputs: parameter name to parameter object dict
-    :return: dict of parameter name to Parameter models
+    Merges parameter values according to those declared by a type.
+
+    Exceptions will be raised for validation errors.
+
+    :param parameter_values: provided parameter values or None
+    :type parameter_values: {basestring, object}
+    :param declared_parameters: declared parameters
+    :type declared_parameters: {basestring, :class:`aria.modeling.models.Parameter`}
+    :return: the merged parameters
+    :rtype: {basestring, :class:`aria.modeling.models.Parameter`}
+    :raises aria.modeling.exceptions.UndeclaredParametersException: if a key in ``parameter_values``
+            does not exist in ``declared_parameters``
+    :raises aria.modeling.exceptions.MissingRequiredParametersException: if a key in
+            ``declared_parameters`` does not exist in ``parameter_values`` and also has no default
+            value
+    :raises aria.modeling.exceptions.ParametersOfWrongTypeException: if a value in
+            ``parameter_values`` does not match its type in ``declared_parameters``
     """
-    merged_inputs = _merge_and_validate_inputs(inputs, template_inputs)
 
     from . import models
-    input_models = []
-    for input_name, input_val in merged_inputs.iteritems():
-        parameter = models.Parameter( # pylint: disable=unexpected-keyword-arg
-            name=input_name,
-            type_name=template_inputs[input_name].type_name,
-            description=template_inputs[input_name].description,
-            value=input_val)
-        input_models.append(parameter)
 
-    return dict((inp.name, inp) for inp in input_models)
+    parameter_values = parameter_values or {}
 
+    undeclared_names = list(set(parameter_values.keys()).difference(declared_parameters.keys()))
+    if undeclared_names:
+        raise exceptions.UndeclaredParametersException(
+            'Undeclared parameters have been provided: {0}; Declared: {1}'
+            .format(string_list_as_string(undeclared_names),
+                    string_list_as_string(declared_parameters.keys())))
 
-def _merge_and_validate_inputs(inputs, template_inputs):
-    """
-    :param inputs: key-value dict
-    :param template_inputs: parameter name to parameter object dict
-    :return:
-    """
-    merged_inputs = inputs.copy()
+    parameters = OrderedDict()
 
-    missing_inputs = []
-    wrong_type_inputs = {}
-    for input_name, input_template in template_inputs.iteritems():
-        if input_name not in inputs:
-            if input_template.value is not None:
-                merged_inputs[input_name] = input_template.value  # apply default value
-            else:
-                missing_inputs.append(input_name)
-        else:
-            # Validate input type
+    missing_names = []
+    wrong_type_values = OrderedDict()
+    for declared_parameter_name, declared_parameter in declared_parameters.iteritems():
+        if declared_parameter_name in parameter_values:
+            # Value has been provided
+            value = parameter_values[declared_parameter_name]
+
+            # Validate type
+            type_name = declared_parameter.type_name
             try:
-                validate_value_type(inputs[input_name], input_template.type_name)
+                validate_value_type(value, type_name)
             except ValueError:
-                wrong_type_inputs[input_name] = input_template.type_name
+                wrong_type_values[declared_parameter_name] = type_name
             except RuntimeError:
                 # TODO: This error shouldn't be raised (or caught), but right now we lack support
                 # for custom data_types, which will raise this error. Skipping their validation.
                 pass
 
-    if missing_inputs:
-        raise exceptions.MissingRequiredInputsException(
-            'Required inputs {0} have not been specified - expected inputs: {1}'
-            .format(missing_inputs, template_inputs.keys()))
+            # Wrap in Parameter model
+            parameters[declared_parameter_name] = models.Parameter( # pylint: disable=unexpected-keyword-arg
+                name=declared_parameter_name,
+                type_name=type_name,
+                description=declared_parameter.description,
+                value=value)
+        elif declared_parameter.value is not None:
+            # Copy default value from declaration
+            parameters[declared_parameter_name] = declared_parameter.instantiate(None)
+        else:
+            # Required value has not been provided
+            missing_names.append(declared_parameter_name)
 
-    if wrong_type_inputs:
+    if missing_names:
+        raise exceptions.MissingRequiredParametersException(
+            'Declared parameters {0} have not been provided values'
+            .format(string_list_as_string(missing_names)))
+
+    if wrong_type_values:
         error_message = StringIO()
-        for param_name, param_type in wrong_type_inputs.iteritems():
-            error_message.write('Input "{0}" must be of type {1}{2}'
+        for param_name, param_type in wrong_type_values.iteritems():
+            error_message.write('Parameter "{0}" is not of declared type "{1}"{2}'
                                 .format(param_name, param_type, os.linesep))
-        raise exceptions.InputsOfWrongTypeException(error_message.getvalue())
+        raise exceptions.ParametersOfWrongTypeException(error_message.getvalue())
 
-    undeclared_inputs = [input_name for input_name in inputs.keys()
-                         if input_name not in template_inputs]
-    if undeclared_inputs:
-        raise exceptions.UndeclaredInputsException(
-            'Undeclared inputs have been specified: {0}; Expected inputs: {1}'
-            .format(undeclared_inputs, template_inputs.keys()))
-
-    return merged_inputs
+    return parameters
 
 
 def coerce_dict_values(the_dict, report_issues=False):
diff --git a/aria/orchestrator/__init__.py b/aria/orchestrator/__init__.py
index 0b57e4b..b855aed 100644
--- a/aria/orchestrator/__init__.py
+++ b/aria/orchestrator/__init__.py
@@ -15,7 +15,12 @@
 """
 ARIA orchestrator
 """
-from .decorators import workflow, operation, WORKFLOW_DECORATOR_RESERVED_ARGUMENTS
+from .decorators import (
+    workflow,
+    operation,
+    WORKFLOW_DECORATOR_RESERVED_ARGUMENTS,
+    OPERATION_DECORATOR_RESERVED_ARGUMENTS
+)
 
 from . import (
     context,
diff --git a/aria/orchestrator/context/operation.py b/aria/orchestrator/context/operation.py
index 7c21351..f0ba337 100644
--- a/aria/orchestrator/context/operation.py
+++ b/aria/orchestrator/context/operation.py
@@ -42,8 +42,8 @@
         self._register_logger(task_id=self.task.id, level=logger_level)
 
     def __repr__(self):
-        details = 'implementation={task.implementation}; ' \
-                  'operation_inputs={task.inputs}'\
+        details = 'function={task.function}; ' \
+                  'operation_arguments={task.arguments}'\
             .format(task=self.task)
         return '{name}({0})'.format(details, name=self.name)
 
diff --git a/aria/orchestrator/decorators.py b/aria/orchestrator/decorators.py
index 62e4a14..4051a54 100644
--- a/aria/orchestrator/decorators.py
+++ b/aria/orchestrator/decorators.py
@@ -26,7 +26,8 @@
 from .workflows.api import task_graph
 
 
-WORKFLOW_DECORATOR_RESERVED_ARGUMENTS = ('ctx', 'graph')
+WORKFLOW_DECORATOR_RESERVED_ARGUMENTS = set(('ctx', 'graph'))
+OPERATION_DECORATOR_RESERVED_ARGUMENTS = set(('ctx', 'toolbelt'))
 
 
 def workflow(func=None, suffix_template=''):
diff --git a/aria/orchestrator/execution_plugin/instantiation.py b/aria/orchestrator/execution_plugin/instantiation.py
index c09434e..9b5152d 100644
--- a/aria/orchestrator/execution_plugin/instantiation.py
+++ b/aria/orchestrator/execution_plugin/instantiation.py
@@ -16,19 +16,14 @@
 # TODO: this module will eventually be moved to a new "aria.instantiation" package
 
 from ...utils.type import full_type_name
+from ...utils.formatting import safe_repr
 from ...utils.collections import OrderedDict
 from ...parser import validation
 from ...parser.consumption import ConsumptionContext
+from ...modeling.functions import Function
 
 
 def configure_operation(operation):
-    configuration = OrderedDict(operation.configuration) if operation.configuration else {}
-
-    arguments = OrderedDict()
-    arguments['script_path'] = operation.implementation
-    arguments['process'] = _get_process(configuration.pop('process')) \
-        if 'process' in configuration else dict()
-
     host = None
     interface = operation.interface
     if interface.node is not None:
@@ -36,107 +31,143 @@
     elif interface.relationship is not None:
         if operation.relationship_edge is True:
             host = interface.relationship.target_node.host
-        else: # either False or None
+        else: # either False or None (None meaning that edge was not specified)
             host = interface.relationship.source_node.host
 
+    _configure_common(operation)
     if host is None:
         _configure_local(operation)
     else:
-        _configure_remote(operation, configuration, arguments)
+        _configure_remote(operation)
 
-    # Any remaining unhandled configuration values will become extra arguments, available as kwargs
-    # in either "run_script_locally" or "run_script_with_ssh"
-    arguments.update(configuration)
+    # Any remaining un-handled configuration parameters will become extra arguments, available as
+    # kwargs in either "run_script_locally" or "run_script_with_ssh"
+    for key, value in operation.configuration.iteritems():
+        if key not in ('process', 'ssh'):
+            operation.arguments[key] = value.instantiate(None)
 
-    return arguments
+
+def _configure_common(operation):
+    """
+    Local and remote operations.
+    """
+
+    from ...modeling.models import Parameter
+    operation.arguments['script_path'] = Parameter.wrap('script_path', operation.implementation,
+                                                        'Relative path to the executable file.')
+    operation.arguments['process'] = Parameter.wrap('process', _get_process(operation),
+                                                    'Sub-process configuration.')
+
 
 def _configure_local(operation):
     """
     Local operation.
     """
+
     from . import operations
-    operation.implementation = '{0}.{1}'.format(operations.__name__,
-                                                operations.run_script_locally.__name__)
+    operation.function = '{0}.{1}'.format(operations.__name__,
+                                          operations.run_script_locally.__name__)
 
 
-def _configure_remote(operation, configuration, arguments):
+def _configure_remote(operation):
     """
     Remote SSH operation via Fabric.
     """
+
+    from ...modeling.models import Parameter
+    from . import operations
+
+    ssh = _get_ssh(operation)
+
+    # Defaults
     # TODO: find a way to configure these generally in the service template
     default_user = ''
     default_password = ''
-
-    ssh = _get_ssh(configuration.pop('ssh')) if 'ssh' in configuration else {}
     if 'user' not in ssh:
         ssh['user'] = default_user
     if ('password' not in ssh) and ('key' not in ssh) and ('key_filename' not in ssh):
         ssh['password'] = default_password
 
-    arguments['use_sudo'] = ssh.get('use_sudo', False)
-    arguments['hide_output'] = ssh.get('hide_output', [])
-    arguments['fabric_env'] = {}
-    if 'warn_only' in ssh:
-        arguments['fabric_env']['warn_only'] = ssh['warn_only']
-    arguments['fabric_env']['user'] = ssh.get('user')
-    arguments['fabric_env']['password'] = ssh.get('password')
-    arguments['fabric_env']['key'] = ssh.get('key')
-    arguments['fabric_env']['key_filename'] = ssh.get('key_filename')
-    if 'address' in ssh:
-        arguments['fabric_env']['host_string'] = ssh['address']
+    operation.arguments['use_sudo'] = Parameter.wrap('use_sudo', ssh.get('use_sudo', False),
+                                                     'Whether to execute with sudo.')
 
-    if arguments['fabric_env'].get('user') is None:
+    operation.arguments['hide_output'] = Parameter.wrap('hide_output', ssh.get('hide_output', []),
+                                                        'Hide output of these Fabric groups.')
+
+    fabric_env = {}
+    if 'warn_only' in ssh:
+        fabric_env['warn_only'] = ssh['warn_only']
+    fabric_env['user'] = ssh.get('user')
+    fabric_env['password'] = ssh.get('password')
+    fabric_env['key'] = ssh.get('key')
+    fabric_env['key_filename'] = ssh.get('key_filename')
+    if 'address' in ssh:
+        fabric_env['host_string'] = ssh['address']
+
+    # Make sure we have a user
+    if fabric_env.get('user') is None:
         context = ConsumptionContext.get_thread_local()
         context.validation.report('must configure "ssh.user" for "{0}"'
                                   .format(operation.implementation),
                                   level=validation.Issue.BETWEEN_TYPES)
-    if (arguments['fabric_env'].get('password') is None) and \
-        (arguments['fabric_env'].get('key') is None) and \
-        (arguments['fabric_env'].get('key_filename') is None):
+
+    # Make sure we have an authentication value
+    if (fabric_env.get('password') is None) and \
+        (fabric_env.get('key') is None) and \
+        (fabric_env.get('key_filename') is None):
         context = ConsumptionContext.get_thread_local()
         context.validation.report('must configure "ssh.password", "ssh.key", or "ssh.key_filename" '
                                   'for "{0}"'
                                   .format(operation.implementation),
                                   level=validation.Issue.BETWEEN_TYPES)
 
-    from . import operations
-    operation.implementation = '{0}.{1}'.format(operations.__name__,
-                                                operations.run_script_with_ssh.__name__)
+    operation.arguments['fabric_env'] = Parameter.wrap('fabric_env', fabric_env,
+                                                       'Fabric configuration.')
+
+    operation.function = '{0}.{1}'.format(operations.__name__,
+                                          operations.run_script_with_ssh.__name__)
 
 
-def _get_process(value):
+def _get_process(operation):
+    value = operation.configuration.get('process')._value \
+        if 'process' in operation.configuration else None
     if value is None:
-        return None
+        return {}
     _validate_type(value, dict, 'process')
+    value = OrderedDict(value)
     for k, v in value.iteritems():
         if k == 'eval_python':
-            value[k] = _str_to_bool(v, 'process.eval_python')
+            value[k] = _coerce_bool(v, 'process.eval_python')
         elif k == 'cwd':
             _validate_type(v, basestring, 'process.cwd')
         elif k == 'command_prefix':
             _validate_type(v, basestring, 'process.command_prefix')
         elif k == 'args':
-            value[k] = _dict_to_list(v, 'process.args')
+            value[k] = _dict_to_list_of_strings(v, 'process.args')
         elif k == 'env':
             _validate_type(v, dict, 'process.env')
         else:
             context = ConsumptionContext.get_thread_local()
-            context.validation.report('unsupported configuration: "process.{0}"'.format(k),
+            context.validation.report('unsupported configuration parameter: "process.{0}"'
+                                      .format(k),
                                       level=validation.Issue.BETWEEN_TYPES)
     return value
 
 
-def _get_ssh(value):
+def _get_ssh(operation):
+    value = operation.configuration.get('ssh')._value \
+        if 'ssh' in operation.configuration else None
     if value is None:
         return {}
     _validate_type(value, dict, 'ssh')
+    value = OrderedDict(value)
     for k, v in value.iteritems():
         if k == 'use_sudo':
-            value[k] = _str_to_bool(v, 'ssh.use_sudo')
+            value[k] = _coerce_bool(v, 'ssh.use_sudo')
         elif k == 'hide_output':
-            value[k] = _dict_to_list(v, 'ssh.hide_output')
+            value[k] = _dict_to_list_of_strings(v, 'ssh.hide_output')
         elif k == 'warn_only':
-            value[k] = _str_to_bool(v, 'ssh.warn_only')
+            value[k] = _coerce_bool(v, 'ssh.warn_only')
         elif k == 'user':
             _validate_type(v, basestring, 'ssh.user')
         elif k == 'password':
@@ -149,22 +180,26 @@
             _validate_type(v, basestring, 'ssh.address')
         else:
             context = ConsumptionContext.get_thread_local()
-            context.validation.report('unsupported configuration: "ssh.{0}"'.format(k),
+            context.validation.report('unsupported configuration parameter: "ssh.{0}"'.format(k),
                                       level=validation.Issue.BETWEEN_TYPES)
     return value
 
 
 def _validate_type(value, the_type, name):
+    if isinstance(value, Function):
+        return
     if not isinstance(value, the_type):
         context = ConsumptionContext.get_thread_local()
-        context.validation.report('"{0}" configuration is not a {1}'
-                                  .format(name, full_type_name(the_type)),
+        context.validation.report('"{0}" configuration is not a {1}: {2}'
+                                  .format(name, full_type_name(the_type), safe_repr(value)),
                                   level=validation.Issue.BETWEEN_TYPES)
 
 
-def _str_to_bool(value, name):
+def _coerce_bool(value, name):
     if value is None:
         return None
+    if isinstance(value, bool):
+        return value
     _validate_type(value, basestring, name)
     if value == 'true':
         return True
@@ -173,19 +208,15 @@
     else:
         context = ConsumptionContext.get_thread_local()
         context.validation.report('"{0}" configuration is not "true" or "false": {1}'
-                                  .format(name, repr(value)),
+                                  .format(name, safe_repr(value)),
                                   level=validation.Issue.BETWEEN_TYPES)
 
 
-def _dict_to_list(the_dict, name):
+def _dict_to_list_of_strings(the_dict, name):
     _validate_type(the_dict, dict, name)
     value = []
     for k in sorted(the_dict):
         v = the_dict[k]
-        if not isinstance(v, basestring):
-            context = ConsumptionContext.get_thread_local()
-            context.validation.report('"{0}.{1}" configuration is not a string: {2}'
-                                      .format(name, k, repr(v)),
-                                      level=validation.Issue.BETWEEN_TYPES)
+        _validate_type(v, basestring, '{0}.{1}'.format(name, k))
         value.append(v)
     return value
diff --git a/aria/orchestrator/workflow_runner.py b/aria/orchestrator/workflow_runner.py
index 8f25cce..15a55b0 100644
--- a/aria/orchestrator/workflow_runner.py
+++ b/aria/orchestrator/workflow_runner.py
@@ -42,9 +42,10 @@
                  executor=None, task_max_attempts=DEFAULT_TASK_MAX_ATTEMPTS,
                  task_retry_interval=DEFAULT_TASK_RETRY_INTERVAL):
         """
-        Manages a single workflow execution on a given service
+        Manages a single workflow execution on a given service.
+
         :param workflow_name: Workflow name
-        :param service_id: Service id
+        :param service_id: Service ID
         :param inputs: A key-value dict of inputs for the execution
         :param model_storage: Model storage
         :param resource_storage: Resource storage
@@ -80,7 +81,7 @@
             task_retry_interval=task_retry_interval)
 
         # transforming the execution inputs to dict, to pass them to the workflow function
-        execution_inputs_dict = dict(inp.unwrap() for inp in self.execution.inputs.values())
+        execution_inputs_dict = dict(inp.unwrapped for inp in self.execution.inputs.values())
         self._tasks_graph = workflow_fn(ctx=workflow_context, **execution_inputs_dict)
 
         executor = executor or ProcessExecutor(plugin_manager=plugin_manager)
@@ -119,7 +120,7 @@
         else:
             workflow_inputs = self.service.workflows[self._workflow_name].inputs
 
-        execution.inputs = modeling_utils.create_inputs(inputs, workflow_inputs)
+        execution.inputs = modeling_utils.merge_parameter_values(inputs, workflow_inputs)
         # TODO: these two following calls should execute atomically
         self._validate_no_active_executions(execution)
         self._model_storage.execution.put(execution)
@@ -136,7 +137,7 @@
         active_executions = [e for e in self.service.executions if e.is_active()]
         if active_executions:
             raise exceptions.ActiveExecutionsError(
-                "Can't start execution; Service {0} has an active execution with id {1}"
+                "Can't start execution; Service {0} has an active execution with ID {1}"
                 .format(self.service.name, active_executions[0].id))
 
     def _get_workflow_fn(self):
@@ -156,10 +157,10 @@
         sys.path.append(service_template_resources_path)
 
         try:
-            workflow_fn = import_fullname(workflow.implementation)
+            workflow_fn = import_fullname(workflow.function)
         except ImportError:
             raise exceptions.WorkflowImplementationNotFoundError(
-                'Could not find workflow {0} implementation at {1}'.format(
-                    self._workflow_name, workflow.implementation))
+                'Could not find workflow {0} function at {1}'.format(
+                    self._workflow_name, workflow.function))
 
         return workflow_fn
diff --git a/aria/orchestrator/workflows/api/task.py b/aria/orchestrator/workflows/api/task.py
index cb79eb3..feacaf4 100644
--- a/aria/orchestrator/workflows/api/task.py
+++ b/aria/orchestrator/workflows/api/task.py
@@ -55,7 +55,28 @@
 
 class OperationTask(BaseTask):
     """
-    Represents an operation task in the task graph
+    Represents an operation task in the task graph.
+
+    :ivar name: formatted name (includes actor type, actor name, and interface/operation names)
+    :vartype name: basestring
+    :ivar actor: node or relationship
+    :vartype actor: :class:`aria.modeling.models.Node`|:class:`aria.modeling.models.Relationship`
+    :ivar interface_name: interface name on actor
+    :vartype interface_name: basestring
+    :ivar operation_name: operation name on interface
+    :vartype operation_name: basestring
+    :ivar plugin: plugin (or None for default plugin)
+    :vartype plugin: :class:`aria.modeling.models.Plugin`
+    :ivar function: path to Python function
+    :vartype function: basestring
+    :ivar arguments: arguments to send to Python function
+    :vartype arguments: {basestring, :class:`aria.modeling.models.Parameter`}
+    :ivar ignore_failure: whether to ignore failures
+    :vartype ignore_failure: bool
+    :ivar max_attempts: maximum number of attempts allowed in case of failure
+    :vartype max_attempts: int
+    :ivar retry_interval: interval between retries (in seconds)
+    :vartype retry_interval: int
     """
 
     NAME_FORMAT = '{interface}:{operation}@{type}:{name}'
@@ -64,43 +85,61 @@
                  actor,
                  interface_name,
                  operation_name,
-                 inputs=None,
+                 arguments=None,
+                 ignore_failure=None,
                  max_attempts=None,
-                 retry_interval=None,
-                 ignore_failure=None):
+                 retry_interval=None):
         """
-        Do not call this constructor directly. Instead, use :meth:`for_node` or
-        :meth:`for_relationship`.
+        :param actor: node or relationship
+        :type actor: :class:`aria.modeling.models.Node`|:class:`aria.modeling.models.Relationship`
+        :param interface_name: interface name on actor
+        :type interface_name: basestring
+        :param operation_name: operation name on interface
+        :type operation_name: basestring
+        :param arguments: override argument values
+        :type arguments: {basestring, object}
+        :param ignore_failure: override whether to ignore failures
+        :type ignore_failure: bool
+        :param max_attempts: override maximum number of attempts allowed in case of failure
+        :type max_attempts: int
+        :param retry_interval: override interval between retries (in seconds)
+        :type retry_interval: int
+        :raises aria.orchestrator.workflows.exceptions.OperationNotFoundException: if
+                ``interface_name`` and ``operation_name`` to not refer to an operation on the actor
         """
+
         assert isinstance(actor, (models.Node, models.Relationship))
-        super(OperationTask, self).__init__()
-        self.actor = actor
-        self.interface_name = interface_name
-        self.operation_name = operation_name
-        self.max_attempts = max_attempts or self.workflow_context._task_max_attempts
-        self.retry_interval = retry_interval or self.workflow_context._task_retry_interval
-        self.ignore_failure = \
-            self.workflow_context._task_ignore_failure if ignore_failure is None else ignore_failure
-        self.name = OperationTask.NAME_FORMAT.format(type=type(actor).__name__.lower(),
-                                                     name=actor.name,
-                                                     interface=self.interface_name,
-                                                     operation=self.operation_name)
+
         # Creating OperationTask directly should raise an error when there is no
         # interface/operation.
-
-        if not has_operation(self.actor, self.interface_name, self.operation_name):
+        if not has_operation(actor, interface_name, operation_name):
             raise exceptions.OperationNotFoundException(
-                'Could not find operation "{self.operation_name}" on interface '
-                '"{self.interface_name}" for {actor_type} "{actor.name}"'.format(
-                    self=self,
+                'Could not find operation "{operation_name}" on interface '
+                '"{interface_name}" for {actor_type} "{actor.name}"'.format(
+                    operation_name=operation_name,
+                    interface_name=interface_name,
                     actor_type=type(actor).__name__.lower(),
                     actor=actor)
             )
 
+        super(OperationTask, self).__init__()
+
+        self.name = OperationTask.NAME_FORMAT.format(type=type(actor).__name__.lower(),
+                                                     name=actor.name,
+                                                     interface=interface_name,
+                                                     operation=operation_name)
+        self.actor = actor
+        self.interface_name = interface_name
+        self.operation_name = operation_name
+        self.ignore_failure = \
+            self.workflow_context._task_ignore_failure if ignore_failure is None else ignore_failure
+        self.max_attempts = max_attempts or self.workflow_context._task_max_attempts
+        self.retry_interval = retry_interval or self.workflow_context._task_retry_interval
+
         operation = self.actor.interfaces[self.interface_name].operations[self.operation_name]
         self.plugin = operation.plugin
-        self.inputs = modeling_utils.create_inputs(inputs or {}, operation.inputs)
-        self.implementation = operation.implementation
+        self.function = operation.function
+        self.arguments = modeling_utils.merge_parameter_values(arguments, operation.arguments)
 
     def __repr__(self):
         return self.name
diff --git a/aria/orchestrator/workflows/builtin/execute_operation.py b/aria/orchestrator/workflows/builtin/execute_operation.py
index 02a654a..437e584 100644
--- a/aria/orchestrator/workflows/builtin/execute_operation.py
+++ b/aria/orchestrator/workflows/builtin/execute_operation.py
@@ -69,7 +69,7 @@
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=operation_kwargs
+                arguments=operation_kwargs
             )
         )
 
diff --git a/aria/orchestrator/workflows/core/task.py b/aria/orchestrator/workflows/core/task.py
index b3dfb3c..72d83ea 100644
--- a/aria/orchestrator/workflows/core/task.py
+++ b/aria/orchestrator/workflows/core/task.py
@@ -146,9 +146,8 @@
 
             # Only non-stub tasks have these fields
             plugin=api_task.plugin,
-            implementation=api_task.implementation,
-            inputs=api_task.inputs
-
+            function=api_task.function,
+            arguments=api_task.arguments
         )
         self._workflow_context.model.task.put(task_model)
 
diff --git a/aria/orchestrator/workflows/events_logging.py b/aria/orchestrator/workflows/events_logging.py
index 236a55f..036c1f7 100644
--- a/aria/orchestrator/workflows/events_logging.py
+++ b/aria/orchestrator/workflows/events_logging.py
@@ -35,8 +35,8 @@
 
 @events.start_task_signal.connect
 def _start_task_handler(task, **kwargs):
-    # If the task has not implementation this is an empty task.
-    if task.implementation:
+    # If the task has no function this is an empty task.
+    if task.function:
         suffix = 'started...'
         logger = task.context.logger.info
     else:
@@ -48,7 +48,7 @@
 
 @events.on_success_task_signal.connect
 def _success_task_handler(task, **kwargs):
-    if not task.implementation:
+    if not task.function:
         return
     task.context.logger.info('{name} {task.interface_name}.{task.operation_name} successful'
                              .format(name=_get_task_name(task), task=task))
diff --git a/aria/orchestrator/workflows/executor/base.py b/aria/orchestrator/workflows/executor/base.py
index c543278..7fece6f 100644
--- a/aria/orchestrator/workflows/executor/base.py
+++ b/aria/orchestrator/workflows/executor/base.py
@@ -33,10 +33,10 @@
         Execute a task
         :param task: task to execute
         """
-        if task.implementation:
+        if task.function:
             self._execute(task)
         else:
-            # In this case the task is missing an implementation. This task still gets to an
+            # In this case the task is missing a function. This task still gets to an
             # executor, but since there is nothing to run, we by default simply skip the execution
             # itself.
             self._task_started(task)
diff --git a/aria/orchestrator/workflows/executor/celery.py b/aria/orchestrator/workflows/executor/celery.py
index bbddc25..9d66d26 100644
--- a/aria/orchestrator/workflows/executor/celery.py
+++ b/aria/orchestrator/workflows/executor/celery.py
@@ -44,11 +44,11 @@
 
     def _execute(self, task):
         self._tasks[task.id] = task
-        inputs = dict(inp.unwrap() for inp in task.inputs.values())
-        inputs['ctx'] = task.context
+        arguments = dict(arg.unwrapped for arg in task.arguments.values())
+        arguments['ctx'] = task.context
         self._results[task.id] = self._app.send_task(
             task.operation_mapping,
-            kwargs=inputs,
+            kwargs=arguments,
             task_id=task.id,
             queue=self._get_queue(task))
 
diff --git a/aria/orchestrator/workflows/executor/dry.py b/aria/orchestrator/workflows/executor/dry.py
index 63ec392..8848df8 100644
--- a/aria/orchestrator/workflows/executor/dry.py
+++ b/aria/orchestrator/workflows/executor/dry.py
@@ -33,7 +33,7 @@
             task.status = task.STARTED
 
         dry_msg = '<dry> {name} {task.interface_name}.{task.operation_name} {suffix}'
-        logger = task.context.logger.info if task.implementation else task.context.logger.debug
+        logger = task.context.logger.info if task.function else task.context.logger.debug
 
         if hasattr(task.actor, 'source_node'):
             name = '{source_node.name}->{target_node.name}'.format(
@@ -41,11 +41,11 @@
         else:
             name = task.actor.name
 
-        if task.implementation:
+        if task.function:
             logger(dry_msg.format(name=name, task=task, suffix='started...'))
             logger(dry_msg.format(name=name, task=task, suffix='successful'))
         else:
-            logger(dry_msg.format(name=name, task=task, suffix='has no implementation'))
+            logger(dry_msg.format(name=name, task=task, suffix='has no function'))
 
         # updating the task manually instead of calling self._task_succeeded(task),
         # to avoid any side effects raising that event might cause
diff --git a/aria/orchestrator/workflows/executor/process.py b/aria/orchestrator/workflows/executor/process.py
index f02e0a6..634f1f2 100644
--- a/aria/orchestrator/workflows/executor/process.py
+++ b/aria/orchestrator/workflows/executor/process.py
@@ -140,8 +140,8 @@
     def _create_arguments_dict(self, task):
         return {
             'task_id': task.id,
-            'implementation': task.implementation,
-            'operation_inputs': dict(inp.unwrap() for inp in task.inputs.values()),
+            'function': task.function,
+            'operation_arguments': dict(arg.unwrapped for arg in task.arguments.values()),
             'port': self._server_port,
             'context': task.context.serialization_dict,
         }
@@ -290,8 +290,8 @@
     port = arguments['port']
     messenger = _Messenger(task_id=task_id, port=port)
 
-    implementation = arguments['implementation']
-    operation_inputs = arguments['operation_inputs']
+    function = arguments['function']
+    operation_arguments = arguments['operation_arguments']
     context_dict = arguments['context']
 
     try:
@@ -302,11 +302,11 @@
 
     try:
         messenger.started()
-        task_func = imports.load_attribute(implementation)
+        task_func = imports.load_attribute(function)
         aria.install_aria_extensions()
         for decorate in process_executor.decorate():
             task_func = decorate(task_func)
-        task_func(ctx=ctx, **operation_inputs)
+        task_func(ctx=ctx, **operation_arguments)
         ctx.close()
         messenger.succeeded()
     except BaseException as e:
diff --git a/aria/orchestrator/workflows/executor/thread.py b/aria/orchestrator/workflows/executor/thread.py
index f53362a..56a56a5 100644
--- a/aria/orchestrator/workflows/executor/thread.py
+++ b/aria/orchestrator/workflows/executor/thread.py
@@ -60,9 +60,9 @@
                 task = self._queue.get(timeout=1)
                 self._task_started(task)
                 try:
-                    task_func = imports.load_attribute(task.implementation)
-                    inputs = dict(inp.unwrap() for inp in task.inputs.values())
-                    task_func(ctx=task.context, **inputs)
+                    task_func = imports.load_attribute(task.function)
+                    arguments = dict(arg.unwrapped for arg in task.arguments.values())
+                    task_func(ctx=task.context, **arguments)
                     self._task_succeeded(task)
                 except BaseException as e:
                     self._task_failed(task,
diff --git a/aria/utils/formatting.py b/aria/utils/formatting.py
index f96a4ce..b8d24cd 100644
--- a/aria/utils/formatting.py
+++ b/aria/utils/formatting.py
@@ -124,7 +124,9 @@
     Nice representation of a list of strings.
     """
 
-    return ', '.join('"%s"' % safe_str(v) for v in strings)
+    if not strings:
+        return 'none'
+    return ', '.join('"{0}"'.format(safe_str(v)) for v in strings)
 
 
 def pluralize(noun):
diff --git a/aria/utils/validation.py b/aria/utils/validation.py
index a33f7a2..193cb33 100644
--- a/aria/utils/validation.py
+++ b/aria/utils/validation.py
@@ -17,6 +17,8 @@
 Contains validation related utilities
 """
 
+from .formatting import string_list_as_string
+
 
 class ValidatorMixin(object):
     """
@@ -82,8 +84,8 @@
     for arg in non_default_args:
         if arg not in func_kwargs:
             raise ValueError(
-                "The argument '{arg}' doest not have a default value, and it "
-                "isn't passed to {func.__name__}".format(arg=arg, func=func))
+                'The argument "{arg}" is not provided and does not have a default value for '
+                'function "{func.__name__}"'.format(arg=arg, func=func))
 
     # check if there are any extra kwargs
     extra_kwargs = [arg for arg in func_kwargs.keys() if arg not in args]
@@ -91,5 +93,5 @@
     # assert that the function has kwargs
     if extra_kwargs and not has_kwargs:
         raise ValueError("The following extra kwargs were supplied: {extra_kwargs}".format(
-            extra_kwargs=extra_kwargs
+            extra_kwargs=string_list_as_string(extra_kwargs)
         ))
diff --git a/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml b/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml
index 0c5e77f..c1dc11d 100644
--- a/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml
+++ b/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml
@@ -59,11 +59,3 @@
           service topology.
         type: string
         required: true
-      dependencies:
-        description: >-
-          The optional ordered list of one or more dependent or secondary implementation artifact
-          name which are referenced by the primary implementation artifact (e.g., a library the
-          script installs or a secondary script).
-        type: list
-        entry_schema: string
-        required: false
diff --git a/extensions/aria_extension_tosca/simple_v1_0/assignments.py b/extensions/aria_extension_tosca/simple_v1_0/assignments.py
index d929ce0..79f6377 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/assignments.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/assignments.py
@@ -23,7 +23,7 @@
 
 from .filters import NodeFilter
 from .misc import Description, OperationImplementation
-from .modeling.properties import get_assigned_and_defined_property_values
+from .modeling.parameters import get_assigned_and_defined_parameter_values
 from .presentation.extensible import ExtensiblePresentation
 from .presentation.field_validators import (node_template_or_type_validator,
                                             relationship_template_or_type_validator,
@@ -428,7 +428,7 @@
 
     @cachedmethod
     def _get_property_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
 
     @cachedmethod
     def _validate(self, context):
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
index 99389e4..440cb96 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
@@ -26,14 +26,19 @@
 from types import FunctionType
 from datetime import datetime
 
+from ruamel import yaml
+
 from aria.parser.validation import Issue
-from aria.utils.collections import StrictDict
+from aria.utils.formatting import string_list_as_string
+from aria.utils.collections import (StrictDict, OrderedDict)
+from aria.orchestrator import WORKFLOW_DECORATOR_RESERVED_ARGUMENTS
 from aria.modeling.models import (Type, ServiceTemplate, NodeTemplate,
                                   RequirementTemplate, RelationshipTemplate, CapabilityTemplate,
                                   GroupTemplate, PolicyTemplate, SubstitutionTemplate,
                                   SubstitutionTemplateMapping, InterfaceTemplate, OperationTemplate,
                                   ArtifactTemplate, Metadata, Parameter, PluginSpecification)
 
+from .parameters import coerce_parameter_value
 from .constraints import (Equal, GreaterThan, GreaterOrEqual, LessThan, LessOrEqual, InRange,
                           ValidValues, Length, MinLength, MaxLength, Pattern)
 from ..data_types import coerce_value
@@ -375,7 +380,7 @@
     implementation = operation.implementation
     if implementation is not None:
         primary = implementation.primary
-        parse_implementation_string(context, service_template, operation, model, primary)
+        extract_implementation_primary(context, service_template, operation, model, primary)
         relationship_edge = operation._get_extensions(context).get('relationship_edge')
         if relationship_edge is not None:
             if relationship_edge == 'source':
@@ -384,18 +389,39 @@
                 model.relationship_edge = True
 
         dependencies = implementation.dependencies
+        configuration = OrderedDict()
         if dependencies:
             for dependency in dependencies:
                 key, value = split_prefix(dependency)
                 if key is not None:
-                    if model.configuration is None:
-                        model.configuration = {}
-                    set_nested(model.configuration, key.split('.'), value)
+                    # Special ARIA prefix: signifies configuration parameters
+
+                    # Parse as YAML
+                    try:
+                        value = yaml.load(value)
+                    except yaml.parser.MarkedYAMLError as e:
+                        context.validation.report(
+                            'YAML parser {0} in operation configuration: {1}'
+                            .format(e.problem, value),
+                            locator=implementation._locator,
+                            level=Issue.FIELD)
+                        continue
+
+                    # Coerce to intrinsic functions, if there are any
+                    value = coerce_parameter_value(context, implementation, None, value).value
+
+                    # Support dot-notation nesting
+                    set_nested(configuration, key.split('.'), value)
                 else:
                     if model.dependencies is None:
                         model.dependencies = []
                     model.dependencies.append(dependency)
 
+        # Convert configuration to Parameter models
+        for key, value in configuration.iteritems():
+            model.configuration[key] = Parameter.wrap(key, value,
+                                                      description='Operation configuration.')
+
     inputs = operation.inputs
     if inputs:
         for input_name, the_input in inputs.iteritems():
@@ -491,15 +517,22 @@
     properties = policy._get_property_values(context)
     for prop_name, prop in properties.iteritems():
         if prop_name == 'implementation':
-            parse_implementation_string(context, service_template, policy, model, prop.value)
-        elif prop_name == 'dependencies':
-            model.dependencies = prop.value
+            model.function = prop.value
         else:
             model.inputs[prop_name] = Parameter(name=prop_name, # pylint: disable=unexpected-keyword-arg
                                                 type_name=prop.type,
                                                 value=prop.value,
                                                 description=prop.description)
 
+    used_reserved_names = WORKFLOW_DECORATOR_RESERVED_ARGUMENTS.intersection(model.inputs.keys())
+    if used_reserved_names:
+        context.validation.report('using reserved arguments in workflow policy "{0}": {1}'
+                                  .format(
+                                      policy._name,
+                                      string_list_as_string(used_reserved_names)),
+                                  locator=policy._locator,
+                                  level=Issue.EXTERNAL)
+
     return model
 
 
@@ -639,13 +672,13 @@
 
 def split_prefix(string):
     """
-    Splits the prefix on the first unescaped ">".
+    Splits the prefix on the first non-escaped ">".
     """
 
-    split = IMPLEMENTATION_PREFIX_REGEX.split(string, 2)
+    split = IMPLEMENTATION_PREFIX_REGEX.split(string, 1)
     if len(split) < 2:
-        return None, string
-    return split[0].strip(), split[1].lstrip()
+        return None, None
+    return split[0].strip(), split[1].strip()
 
 
 def set_nested(the_dict, keys, value):
@@ -671,13 +704,18 @@
         set_nested(the_dict[key], keys, value)
 
 
-def parse_implementation_string(context, service_template, presentation, model, implementation):
-    plugin_name, model.implementation = split_prefix(implementation)
-    if plugin_name is not None:
-        model.plugin_specification = service_template.plugin_specifications.get(plugin_name)
+def extract_implementation_primary(context, service_template, presentation, model, primary):
+    prefix, postfix = split_prefix(primary)
+    if prefix:
+        # Special ARIA prefix
+        model.plugin_specification = service_template.plugin_specifications.get(prefix)
+        model.function = postfix
         if model.plugin_specification is None:
             context.validation.report(
                 'no policy for plugin "{0}" specified in operation implementation: {1}'
-                .format(plugin_name, implementation),
+                .format(prefix, primary),
                 locator=presentation._get_child_locator('properties', 'implementation'),
                 level=Issue.BETWEEN_TYPES)
+    else:
+        # Standard TOSCA artifact with default plugin
+        model.implementation = primary
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
index 4f61ef5..dd9eeb4 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
@@ -15,11 +15,11 @@
 
 from aria.utils.collections import OrderedDict
 
+
 #
 # NodeType, NodeTemplate
 #
 
-
 def get_inherited_artifact_definitions(context, presentation, for_presentation=None):
 
     if hasattr(presentation, '_get_type'):
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
index 6df7177..a90a9fc 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
@@ -16,8 +16,9 @@
 from aria.utils.collections import deepcopy_with_locators, OrderedDict
 from aria.parser.validation import Issue
 
-from .properties import (convert_property_definitions_to_values, merge_raw_property_definitions,
-                         get_assigned_and_defined_property_values)
+from .parameters import (convert_parameter_definitions_to_values, merge_raw_parameter_definitions,
+                         get_assigned_and_defined_parameter_values)
+
 
 #
 # CapabilityType
@@ -38,6 +39,7 @@
 
     return valid_source_types
 
+
 #
 # NodeType
 #
@@ -92,6 +94,7 @@
 
     return capability_definitions
 
+
 #
 # NodeTemplate
 #
@@ -127,8 +130,9 @@
                 capability_assignment = capability_assignments[capability_name]
 
                 # Assign properties
-                values = get_assigned_and_defined_property_values(context,
-                                                                  our_capability_assignment)
+                values = get_assigned_and_defined_parameter_values(context,
+                                                                   our_capability_assignment,
+                                                                   'property')
                 if values:
                     capability_assignment._raw['properties'] = values
             else:
@@ -139,6 +143,7 @@
 
     return capability_assignments
 
+
 #
 # Utils
 #
@@ -150,24 +155,25 @@
 
     properties = presentation.properties
     if properties is not None:
-        raw['properties'] = convert_property_definitions_to_values(context, properties)
+        raw['properties'] = convert_parameter_definitions_to_values(context, properties)
 
     # TODO attributes
 
     return CapabilityAssignment(name=presentation._name, raw=raw, container=container)
 
+
 def merge_capability_definition_from_type(context, presentation, capability_definition):
     raw_properties = OrderedDict()
 
     # Merge properties from type
     the_type = capability_definition._get_type(context)
     type_property_defintions = the_type._get_properties(context)
-    merge_raw_property_definitions(context, presentation, raw_properties, type_property_defintions,
-                                   'properties')
+    merge_raw_parameter_definitions(context, presentation, raw_properties, type_property_defintions,
+                                    'properties')
 
     # Merge our properties
-    merge_raw_property_definitions(context, presentation, raw_properties,
-                                   capability_definition.properties, 'properties')
+    merge_raw_parameter_definitions(context, presentation, raw_properties,
+                                    capability_definition.properties, 'properties')
 
     if raw_properties:
         capability_definition._raw['properties'] = raw_properties
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
index 7c99eab..9a30cc1 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
@@ -15,7 +15,7 @@
 
 import re
 
-from aria.modeling.contraints import NodeTemplateConstraint
+from aria.modeling.constraints import NodeTemplateConstraint
 from aria.modeling.utils import NodeTemplateContainerHolder
 from aria.modeling.functions import evaluate
 from aria.parser import implements_specification
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
index 3952785..c0d79e5 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
@@ -26,6 +26,7 @@
 from .functions import get_function
 from ..presentation.types import get_type_by_full_or_shorthand_name
 
+
 #
 # DataType
 #
@@ -50,6 +51,7 @@
 
     return constraints
 
+
 def coerce_data_type_value(context, presentation, data_type, entry_schema, constraints, value, # pylint: disable=unused-argument
                            aspect):
     """
@@ -121,6 +123,7 @@
 
     return value
 
+
 def validate_data_type_name(context, presentation):
     """
     Makes sure the complex data type's name is not that of a built-in type.
@@ -132,6 +135,7 @@
                                   % safe_repr(name),
                                   locator=presentation._locator, level=Issue.BETWEEN_TYPES)
 
+
 #
 # PropertyDefinition, AttributeDefinition, EntrySchema, DataType
 #
@@ -172,6 +176,7 @@
     # Try primitive data type
     return get_primitive_data_type(type_name)
 
+
 #
 # PropertyDefinition, EntrySchema
 #
@@ -195,6 +200,7 @@
 
     return constraints
 
+
 #
 # ConstraintClause
 #
@@ -310,6 +316,7 @@
 
     return True
 
+
 #
 # Repository
 #
@@ -326,6 +333,7 @@
                                   locator=presentation._locator, level=Issue.BETWEEN_TYPES)
     return None
 
+
 #
 # Utils
 #
@@ -345,6 +353,7 @@
     'boolean': bool,
     'null': None.__class__}
 
+
 @implements_specification('3.2.1-3', 'tosca-simple-1.0')
 def get_primitive_data_type(type_name):
     """
@@ -358,6 +367,7 @@
 
     return PRIMITIVE_DATA_TYPES.get(type_name)
 
+
 def get_data_type_name(the_type):
     """
     Returns the name of the type, whether it's a DataType, a primitive type, or another class.
@@ -365,6 +375,7 @@
 
     return the_type._name if hasattr(the_type, '_name') else full_type_name(the_type)
 
+
 def coerce_value(context, presentation, the_type, entry_schema, constraints, value, aspect=None): # pylint: disable=too-many-return-statements
     """
     Returns the value after it's coerced to its type, reporting validation errors if it cannot be
@@ -410,6 +421,7 @@
     # Coerce to primitive type
     return coerce_to_primitive(context, presentation, the_type, constraints, value, aspect)
 
+
 def coerce_to_primitive(context, presentation, primitive_type, constraints, value, aspect=None):
     """
     Returns the value after it's coerced to a primitive type, translating exceptions to validation
@@ -435,6 +447,7 @@
 
     return value
 
+
 def coerce_to_data_type_class(context, presentation, cls, entry_schema, constraints, value,
                               aspect=None):
     """
@@ -463,6 +476,7 @@
 
     return value
 
+
 def apply_constraints_to_value(context, presentation, constraints, value):
     """
     Applies all constraints to the value. If the value conforms, returns the value. If it does not
@@ -478,6 +492,7 @@
             value = None
     return value
 
+
 def get_container_data_type(presentation):
     if presentation is None:
         return None
@@ -485,6 +500,7 @@
         return presentation
     return get_container_data_type(presentation._container)
 
+
 def report_issue_for_bad_format(context, presentation, the_type, value, aspect, e):
     if aspect == 'default':
         aspect = '"default" value'
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
index 7089ed9..7025213 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from cStringIO import StringIO
+from StringIO import StringIO # Note: cStringIO does not support Unicode
 import re
 
 from aria.utils.collections import FrozenList
@@ -69,7 +69,7 @@
             e, final = evaluate(e, final, container_holder)
             if e is not None:
                 value.write(unicode(e))
-        value = value.getvalue()
+        value = value.getvalue() or u''
         return Evaluation(value, final)
 
 
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
index 3e6aa6f..e04ac4a 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
@@ -13,11 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from aria.utils.collections import merge, deepcopy_with_locators, OrderedDict
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
 from aria.parser.presentation import get_locator
 from aria.parser.validation import Issue
 
-from .properties import (coerce_property_value, convert_property_definitions_to_values)
+from .parameters import (coerce_parameter_value, convert_parameter_definitions_to_values)
+
 
 #
 # InterfaceType
@@ -45,6 +46,7 @@
 
     return operations
 
+
 #
 # InterfaceDefinition
 #
@@ -73,6 +75,7 @@
 
     return inputs
 
+
 def get_and_override_operation_definitions_from_type(context, presentation):
     """
     Returns our operation definitions added on top of those of the interface type, if specified.
@@ -96,6 +99,7 @@
 
     return operations
 
+
 #
 # NodeType, RelationshipType, GroupType
 #
@@ -124,6 +128,7 @@
 
     return interfaces
 
+
 #
 # NodeTemplate, RelationshipTemplate, GroupTemplate
 #
@@ -186,6 +191,7 @@
 
     return template_interfaces
 
+
 #
 # Utils
 #
@@ -200,13 +206,14 @@
     raw = convert_interface_definition_from_type_to_raw_template(context, presentation)
     return InterfaceAssignment(name=presentation._name, raw=raw, container=container)
 
+
 def convert_interface_definition_from_type_to_raw_template(context, presentation): # pylint: disable=invalid-name
     raw = OrderedDict()
 
     # Copy default values for inputs
     inputs = presentation._get_inputs(context)
     if inputs is not None:
-        raw['inputs'] = convert_property_definitions_to_values(context, inputs)
+        raw['inputs'] = convert_parameter_definitions_to_values(context, inputs)
 
     # Copy operations
     operations = presentation._get_operations(context)
@@ -221,11 +228,12 @@
                 raw[operation_name]['implementation'] = deepcopy_with_locators(implementation._raw)
             inputs = operation.inputs
             if inputs is not None:
-                raw[operation_name]['inputs'] = convert_property_definitions_to_values(context,
-                                                                                       inputs)
+                raw[operation_name]['inputs'] = convert_parameter_definitions_to_values(context,
+                                                                                        inputs)
 
     return raw
 
+
 def convert_requirement_interface_definitions_from_type_to_raw_template(context, raw_requirement, # pylint: disable=invalid-name
                                                                         interface_definitions):
     if not interface_definitions:
@@ -240,6 +248,7 @@
         else:
             raw_requirement['interfaces'][interface_name] = raw_interface
 
+
 def merge_interface(context, presentation, interface_assignment, our_interface_assignment,
                     interface_definition, interface_name):
     # Assign/merge interface inputs
@@ -282,6 +291,7 @@
                               our_input_assignments, input_definitions, interface_name,
                               operation_name, presentation)
 
+
 def merge_raw_input_definition(context, the_raw_input, our_input, interface_name, operation_name,
                                presentation, type_name):
     # Check if we changed the type
@@ -305,6 +315,7 @@
     # Merge
     merge(the_raw_input, our_input._raw)
 
+
 def merge_input_definitions(context, inputs, our_inputs, interface_name, operation_name,
                             presentation, type_name):
     for input_name, our_input in our_inputs.iteritems():
@@ -314,6 +325,7 @@
         else:
             inputs[input_name] = our_input._clone(presentation)
 
+
 def merge_raw_input_definitions(context, raw_inputs, our_inputs, interface_name, operation_name,
                                 presentation, type_name):
     for input_name, our_input in our_inputs.iteritems():
@@ -323,6 +335,7 @@
         else:
             raw_inputs[input_name] = deepcopy_with_locators(our_input._raw)
 
+
 def merge_raw_operation_definition(context, raw_operation, our_operation, interface_name,
                                    presentation, type_name):
     if not isinstance(our_operation._raw, dict):
@@ -353,6 +366,7 @@
             raw_operation['implementation'] = \
                 deepcopy_with_locators(our_operation._raw['implementation'])
 
+
 def merge_operation_definitions(context, operations, our_operations, interface_name, presentation,
                                 type_name):
     if not our_operations:
@@ -364,6 +378,7 @@
         else:
             operations[operation_name] = our_operation._clone(presentation)
 
+
 def merge_raw_operation_definitions(context, raw_operations, our_operations, interface_name,
                                     presentation, type_name):
     for operation_name, our_operation in our_operations.iteritems():
@@ -378,6 +393,7 @@
         else:
             raw_operations[operation_name] = deepcopy_with_locators(our_operation._raw)
 
+
 # From either an InterfaceType or an InterfaceDefinition:
 def merge_interface_definition(context, interface, our_source, presentation, type_name):
     if hasattr(our_source, 'type'):
@@ -408,6 +424,7 @@
         merge_raw_operation_definitions(context, interface._raw, our_operations, our_source._name,
                                         presentation, type_name)
 
+
 def merge_interface_definitions(context, interfaces, our_interfaces, presentation,
                                 for_presentation=None):
     if not our_interfaces:
@@ -419,12 +436,14 @@
         else:
             interfaces[name] = our_interface._clone(for_presentation)
 
+
 def merge_interface_definitions_from_their_types(context, interfaces, presentation):
     for interface in interfaces.itervalues():
         the_type = interface._get_type(context) # InterfaceType
         if the_type is not None:
             merge_interface_definition(context, interface, the_type, presentation, 'type')
 
+
 def assign_raw_inputs(context, values, assignments, definitions, interface_name, operation_name,
                       presentation):
     if not assignments:
@@ -454,8 +473,9 @@
         # Note: default value has already been assigned
 
         # Coerce value
-        values['inputs'][input_name] = coerce_property_value(context, assignment, definition,
-                                                             assignment.value)
+        values['inputs'][input_name] = coerce_parameter_value(context, assignment, definition,
+                                                              assignment.value)
+
 
 def validate_required_inputs(context, presentation, assignment, definition, original_assignment,
                              interface_name, operation_name=None):
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/properties.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
similarity index 73%
rename from extensions/aria_extension_tosca/simple_v1_0/modeling/properties.py
rename to extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
index 9c3ea42..c910956 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/properties.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
@@ -13,20 +13,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from aria.utils.collections import merge, deepcopy_with_locators, OrderedDict
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
+from aria.utils.formatting import pluralize
 from aria.parser.presentation import Value
 from aria.parser.validation import Issue
 
 from .data_types import coerce_value
 
+
 #
 # ArtifactType, DataType, CapabilityType, RelationshipType, NodeType, GroupType, PolicyType
 #
 
-# Works on properties, parameters, inputs, and attributes
-def get_inherited_property_definitions(context, presentation, field_name, for_presentation=None):
+def get_inherited_parameter_definitions(context, presentation, field_name, for_presentation=None):
     """
-    Returns our property definitions added on top of those of our parent, if we have one
+    Returns our parameter definitions added on top of those of our parent, if we have one
     (recursively).
 
     Allows overriding all aspects of parent properties except data type.
@@ -35,9 +36,9 @@
     # Get definitions from parent
     # If we inherit from a primitive, it does not have a parent:
     parent = presentation._get_parent(context) if hasattr(presentation, '_get_parent') else None
-    definitions = get_inherited_property_definitions(context, parent, field_name,
-                                                     for_presentation=presentation) \
-                                                     if parent is not None else OrderedDict()
+    definitions = get_inherited_parameter_definitions(context, parent, field_name,
+                                                      for_presentation=presentation) \
+                                                      if parent is not None else OrderedDict()
 
     # Add/merge our definitions
     # If we inherit from a primitive, it does not have our field
@@ -47,19 +48,19 @@
         for name, our_definition in our_definitions.iteritems():
             our_definitions_clone[name] = our_definition._clone(for_presentation)
         our_definitions = our_definitions_clone
-        merge_property_definitions(context, presentation, definitions, our_definitions, field_name)
+        merge_parameter_definitions(context, presentation, definitions, our_definitions, field_name)
 
     for definition in definitions.itervalues():
         definition._reset_method_cache()
 
     return definitions
 
+
 #
 # NodeTemplate, RelationshipTemplate, GroupTemplate, PolicyTemplate
 #
 
-def get_assigned_and_defined_property_values(context, presentation, field_name='property',
-                                             field_name_plural='properties'):
+def get_assigned_and_defined_parameter_values(context, presentation, field_name):
     """
     Returns the assigned property values while making sure they are defined in our type.
 
@@ -71,6 +72,7 @@
     values = OrderedDict()
 
     the_type = presentation._get_type(context)
+    field_name_plural = pluralize(field_name)
     assignments = getattr(presentation, field_name_plural)
     get_fn_name = '_get_{0}'.format(field_name_plural)
     definitions = getattr(the_type, get_fn_name)(context) if the_type is not None else None
@@ -80,7 +82,7 @@
         for name, value in assignments.iteritems():
             if (definitions is not None) and (name in definitions):
                 definition = definitions[name]
-                values[name] = coerce_property_value(context, value, definition, value.value)
+                values[name] = coerce_parameter_value(context, value, definition, value.value)
             else:
                 context.validation.report('assignment to undefined {0} "{1}" in "{2}"'
                                           .format(field_name, name, presentation._fullname),
@@ -90,13 +92,14 @@
     if definitions:
         for name, definition in definitions.iteritems():
             if values.get(name) is None:
-                values[name] = coerce_property_value(context, presentation, definition,
-                                                     definition.default)
+                values[name] = coerce_parameter_value(context, presentation, definition,
+                                                      definition.default)
 
     validate_required_values(context, presentation, values, definitions)
 
     return values
 
+
 #
 # TopologyTemplate
 #
@@ -112,14 +115,15 @@
             if values.get(name) is None:
                 if hasattr(parameter, 'value') and (parameter.value is not None):
                     # For parameters only:
-                    values[name] = coerce_property_value(context, presentation, parameter,
-                                                         parameter.value)
+                    values[name] = coerce_parameter_value(context, presentation, parameter,
+                                                          parameter.value)
                 else:
                     default = parameter.default if hasattr(parameter, 'default') else None
-                    values[name] = coerce_property_value(context, presentation, parameter, default)
+                    values[name] = coerce_parameter_value(context, presentation, parameter, default)
 
     return values
 
+
 #
 # Utils
 #
@@ -139,8 +143,9 @@
                                       locator=presentation._get_child_locator('properties'),
                                       level=Issue.BETWEEN_TYPES)
 
-def merge_raw_property_definition(context, presentation, raw_property_definition,
-                                  our_property_definition, field_name, property_name):
+
+def merge_raw_parameter_definition(context, presentation, raw_property_definition,
+                                   our_property_definition, field_name, property_name):
     # Check if we changed the type
     # TODO: allow a sub-type?
     type1 = raw_property_definition.get('type')
@@ -154,33 +159,36 @@
 
     merge(raw_property_definition, our_property_definition._raw)
 
-def merge_raw_property_definitions(context, presentation, raw_property_definitions,
-                                   our_property_definitions, field_name):
+
+def merge_raw_parameter_definitions(context, presentation, raw_property_definitions,
+                                    our_property_definitions, field_name):
     if not our_property_definitions:
         return
     for property_name, our_property_definition in our_property_definitions.iteritems():
         if property_name in raw_property_definitions:
             raw_property_definition = raw_property_definitions[property_name]
-            merge_raw_property_definition(context, presentation, raw_property_definition,
-                                          our_property_definition, field_name, property_name)
+            merge_raw_parameter_definition(context, presentation, raw_property_definition,
+                                           our_property_definition, field_name, property_name)
         else:
             raw_property_definitions[property_name] = \
                 deepcopy_with_locators(our_property_definition._raw)
 
-def merge_property_definitions(context, presentation, property_definitions,
-                               our_property_definitions, field_name):
+
+def merge_parameter_definitions(context, presentation, property_definitions,
+                                our_property_definitions, field_name):
     if not our_property_definitions:
         return
     for property_name, our_property_definition in our_property_definitions.iteritems():
         if property_name in property_definitions:
             property_definition = property_definitions[property_name]
-            merge_raw_property_definition(context, presentation, property_definition._raw,
-                                          our_property_definition, field_name, property_name)
+            merge_raw_parameter_definition(context, presentation, property_definition._raw,
+                                           our_property_definition, field_name, property_name)
         else:
             property_definitions[property_name] = our_property_definition
 
+
 # Works on properties, inputs, and parameters
-def coerce_property_value(context, presentation, definition, value, aspect=None):
+def coerce_parameter_value(context, presentation, definition, value, aspect=None):
     the_type = definition._get_type(context) if definition is not None else None
     entry_schema = definition.entry_schema if definition is not None else None
     constraints = definition._get_constraints(context) \
@@ -194,9 +202,10 @@
     description = description.value if description is not None else None
     return Value(type_name, value, description)
 
-def convert_property_definitions_to_values(context, definitions):
+
+def convert_parameter_definitions_to_values(context, definitions):
     values = OrderedDict()
     for name, definition in definitions.iteritems():
         default = definition.default
-        values[name] = coerce_property_value(context, definition, definition, default)
+        values[name] = coerce_parameter_value(context, definition, definition, default)
     return values
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
index fba1972..7dd803b 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
@@ -15,6 +15,7 @@
 
 from ..presentation.types import convert_shorthand_to_full_type_name
 
+
 #
 # PolicyType
 #
@@ -49,6 +50,7 @@
 
     return node_types, group_types
 
+
 #
 # PolicyTemplate
 #
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
index 2a68da2..6bdb5b1 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
@@ -14,13 +14,14 @@
 # limitations under the License.
 
 from aria.parser.validation import Issue
-from aria.utils.collections import deepcopy_with_locators, OrderedDict
+from aria.utils.collections import (deepcopy_with_locators, OrderedDict)
 
-from .properties import (convert_property_definitions_to_values, validate_required_values,
-                         coerce_property_value)
+from .parameters import (convert_parameter_definitions_to_values, validate_required_values,
+                         coerce_parameter_value)
 from .interfaces import (convert_requirement_interface_definitions_from_type_to_raw_template,
                          merge_interface_definitions, merge_interface, validate_required_inputs)
 
+
 #
 # NodeType
 #
@@ -49,6 +50,7 @@
 
     return requirement_definitions
 
+
 #
 # NodeTemplate
 #
@@ -127,6 +129,7 @@
 
     return requirement_assignments
 
+
 #
 # Utils
 #
@@ -195,8 +198,8 @@
             if relationship_property_definitions:
                 # Convert property definitions to values
                 raw['relationship']['properties'] = \
-                    convert_property_definitions_to_values(context,
-                                                           relationship_property_definitions)
+                    convert_parameter_definitions_to_values(context,
+                                                            relationship_property_definitions)
 
         # These are our interface definitions
         # InterfaceDefinition:
@@ -229,6 +232,7 @@
         relationship_property_definitions, \
         relationship_interface_definitions
 
+
 def add_requirement_assignments(context, presentation, requirement_assignments,
                                 requirement_definitions, our_requirement_assignments):
     for requirement_name, our_requirement_assignment in our_requirement_assignments:
@@ -258,6 +262,7 @@
                                       locator=our_requirement_assignment._locator,
                                       level=Issue.BETWEEN_TYPES)
 
+
 def merge_requirement_assignment(context, relationship_property_definitions,
                                  relationship_interface_definitions, requirement, our_requirement):
     our_capability = our_requirement.capability
@@ -283,6 +288,7 @@
                                                   relationship_interface_definitions,
                                                   requirement, our_relationship)
 
+
 def merge_requirement_assignment_relationship(context, presentation, property_definitions,
                                               interface_definitions, requirement, our_relationship):
     our_relationship_properties = our_relationship._raw.get('properties')
@@ -296,7 +302,7 @@
             if property_name in property_definitions:
                 definition = property_definitions[property_name]
                 requirement._raw['relationship']['properties'][property_name] = \
-                    coerce_property_value(context, presentation, definition, prop)
+                    coerce_parameter_value(context, presentation, definition, prop)
             else:
                 context.validation.report(
                     'relationship property "%s" not declared at definition of requirement "%s"'
@@ -330,6 +336,7 @@
                        presentation._container._container._fullname),
                     locator=our_relationship._locator, level=Issue.BETWEEN_TYPES)
 
+
 def validate_requirement_assignment(context, presentation, requirement_assignment,
                                     relationship_property_definitions,
                                     relationship_interface_definitions):
@@ -348,6 +355,7 @@
             validate_required_inputs(context, presentation, interface_assignment,
                                      relationship_interface_definition, None, interface_name)
 
+
 def get_first_requirement(requirement_definitions, name):
     if requirement_definitions is not None:
         for requirement_name, requirement_definition in requirement_definitions:
diff --git a/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py b/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
index c1e21de..8f7ec4c 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
@@ -16,6 +16,7 @@
 from aria.utils.formatting import safe_repr
 from aria.parser.validation import Issue
 
+
 def validate_subtitution_mappings_requirement(context, presentation):
     if not validate_format(context, presentation, 'requirement'):
         return
@@ -57,6 +58,7 @@
             locator=presentation._locator, level=Issue.BETWEEN_TYPES)
         return
 
+
 def validate_subtitution_mappings_capability(context, presentation):
     if not validate_format(context, presentation, 'capability'):
         return
@@ -99,6 +101,7 @@
             % (capability_type._name, presentation._name, type_capability_type._name),
             locator=presentation._locator, level=Issue.BETWEEN_TYPES)
 
+
 #
 # Utils
 #
@@ -114,6 +117,7 @@
         return False
     return True
 
+
 def get_node_template(context, presentation, name):
     node_template_name = presentation._raw[0]
     node_template = context.presentation.get_from_dict('service_template', 'topology_template',
diff --git a/extensions/aria_extension_tosca/simple_v1_0/templates.py b/extensions/aria_extension_tosca/simple_v1_0/templates.py
index ce6b5d9..123a00e 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/templates.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/templates.py
@@ -26,7 +26,7 @@
 from .definitions import ParameterDefinition
 from .filters import NodeFilter
 from .misc import (Description, MetaData, Repository, Import, SubstitutionMappings)
-from .modeling.properties import (get_assigned_and_defined_property_values, get_parameter_values)
+from .modeling.parameters import (get_assigned_and_defined_parameter_values, get_parameter_values)
 from .modeling.interfaces import get_template_interfaces
 from .modeling.requirements import get_template_requirements
 from .modeling.capabilities import get_template_capabilities
@@ -157,12 +157,11 @@
 
     @cachedmethod
     def _get_property_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
 
     @cachedmethod
     def _get_attribute_default_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self,
-                                                                   'attribute', 'attributes'))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'attribute'))
 
     @cachedmethod
     def _get_requirements(self, context):
@@ -281,7 +280,7 @@
 
     @cachedmethod
     def _get_property_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
 
     @cachedmethod
     def _get_interfaces(self, context):
@@ -363,7 +362,7 @@
 
     @cachedmethod
     def _get_property_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
 
     @cachedmethod
     def _get_interfaces(self, context):
@@ -427,7 +426,7 @@
 
     @cachedmethod
     def _get_property_values(self, context):
-        return FrozenDict(get_assigned_and_defined_property_values(context, self))
+        return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
 
     @cachedmethod
     def _get_targets(self, context):
diff --git a/extensions/aria_extension_tosca/simple_v1_0/types.py b/extensions/aria_extension_tosca/simple_v1_0/types.py
index bc80eb9..d97b89c 100644
--- a/extensions/aria_extension_tosca/simple_v1_0/types.py
+++ b/extensions/aria_extension_tosca/simple_v1_0/types.py
@@ -33,9 +33,9 @@
                                     get_inherited_capability_definitions)
 from .modeling.data_types import (get_data_type, get_inherited_constraints, coerce_data_type_value,
                                   validate_data_type_name)
-from .modeling.interfaces import get_inherited_interface_definitions, get_inherited_operations
+from .modeling.interfaces import (get_inherited_interface_definitions, get_inherited_operations)
 from .modeling.policies import get_inherited_targets
-from .modeling.properties import get_inherited_property_definitions
+from .modeling.parameters import get_inherited_parameter_definitions
 from .modeling.requirements import get_inherited_requirement_definitions
 from .presentation.extensible import ExtensiblePresentation
 from .presentation.field_getters import data_type_class_getter
@@ -115,7 +115,7 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     def _validate(self, context):
         super(ArtifactType, self)._validate(context)
@@ -201,7 +201,7 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_constraints(self, context):
@@ -307,7 +307,7 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_valid_source_types(self, context):
@@ -385,7 +385,7 @@
 
     @cachedmethod
     def _get_inputs(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'inputs'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'inputs'))
 
     @cachedmethod
     def _get_operations(self, context):
@@ -493,11 +493,11 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_attributes(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'attributes'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'attributes'))
 
     @cachedmethod
     def _get_interfaces(self, context):
@@ -624,11 +624,11 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_attributes(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'attributes'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'attributes'))
 
     @cachedmethod
     def _get_requirements(self, context):
@@ -760,7 +760,7 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_interfaces(self, context):
@@ -848,7 +848,7 @@
 
     @cachedmethod
     def _get_properties(self, context):
-        return FrozenDict(get_inherited_property_definitions(context, self, 'properties'))
+        return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
 
     @cachedmethod
     def _get_targets(self, context):
diff --git a/tests/cli/test_services.py b/tests/cli/test_services.py
index b1a6ee4..e5717cc 100644
--- a/tests/cli/test_services.py
+++ b/tests/cli/test_services.py
@@ -19,7 +19,7 @@
 from aria.cli.env import _Environment
 from aria.core import Core
 from aria.exceptions import DependentActiveExecutionsError, DependentAvailableNodesError
-from aria.modeling.exceptions import InputsException
+from aria.modeling.exceptions import ParameterException
 from aria.storage import exceptions as storage_exceptions
 
 from .base_test import (  # pylint: disable=unused-import
@@ -120,11 +120,11 @@
         monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
         monkeypatch.setattr(Core,
                             'create_service',
-                            raise_exception(InputsException))
+                            raise_exception(ParameterException))
 
         assert_exception_raised(
             self.invoke('services create -t with_inputs test_s'),
-            expected_exception=InputsException)
+            expected_exception=ParameterException)
 
         assert "Service created. The service's name is test_s" not in self.logger_output_string
 
@@ -152,8 +152,8 @@
         assert_exception_raised(
             self.invoke('services delete test_s'),
             expected_exception=DependentActiveExecutionsError,
-            expected_msg="Can't delete service {name} - there is an active execution "
-                         "for this service. Active execution id: 1".format(
+            expected_msg="Can't delete service `{name}` - there is an active execution "
+                         "for this service. Active execution ID: 1".format(
                              name=mock_models.SERVICE_NAME))
 
     def test_delete_available_nodes_error(self, monkeypatch, mock_storage):
@@ -161,8 +161,8 @@
         assert_exception_raised(
             self.invoke('services delete test_s'),
             expected_exception=DependentAvailableNodesError,
-            expected_msg="Can't delete service {name} - there are available nodes "
-                         "for this service. Available node ids: 1".format(
+            expected_msg="Can't delete service `{name}` - there are available nodes "
+                         "for this service. Available node IDs: 1".format(
                              name=mock_models.SERVICE_NAME))
 
     def test_delete_available_nodes_error_with_force(self, monkeypatch, mock_storage):
diff --git a/tests/mock/models.py b/tests/mock/models.py
index 98703d5..50aa340 100644
--- a/tests/mock/models.py
+++ b/tests/mock/models.py
@@ -225,11 +225,11 @@
                      interface_kwargs=None):
     the_type = service.service_template.interface_types.get_descendant('test_interface_type')
 
-    if operation_kwargs and operation_kwargs.get('inputs'):
-        operation_kwargs['inputs'] = dict(
-            (input_name, models.Parameter.wrap(input_name, input_value))
-            for input_name, input_value in operation_kwargs['inputs'].iteritems()
-            if input_value is not None)
+    if operation_kwargs and operation_kwargs.get('arguments'):
+        operation_kwargs['arguments'] = dict(
+            (argument_name, models.Parameter.wrap(argument_name, argument_value))
+            for argument_name, argument_value in operation_kwargs['arguments'].iteritems()
+            if argument_value is not None)
 
     operation = models.Operation(
         name=operation_name,
diff --git a/tests/mock/topology.py b/tests/mock/topology.py
index bfb7b4e..ab08dbd 100644
--- a/tests/mock/topology.py
+++ b/tests/mock/topology.py
@@ -27,9 +27,9 @@
         service_template,
         'Standard', 'create',
         operation_kwargs=dict(
-            implementation=create_operation,
-            inputs={'key': aria_models.Parameter.wrap('key', 'create'),
-                    'value': aria_models.Parameter.wrap('value', True)})
+            function=create_operation,
+            arguments={'key': aria_models.Parameter.wrap('key', 'create'),
+                       'value': aria_models.Parameter.wrap('value', True)})
     )
     node_template.interface_templates[interface_template.name] = interface_template                 # pylint: disable=unsubscriptable-object
 
@@ -38,9 +38,9 @@
         service,
         'Standard', 'create',
         operation_kwargs=dict(
-            implementation=create_operation,
-            inputs={'key': aria_models.Parameter.wrap('key', 'create'),
-                    'value': aria_models.Parameter.wrap('value', True)})
+            function=create_operation,
+            arguments={'key': aria_models.Parameter.wrap('key', 'create'),
+                       'value': aria_models.Parameter.wrap('value', True)})
     )
     node.interfaces[interface.name] = interface                                                     # pylint: disable=unsubscriptable-object
 
diff --git a/tests/modeling/test_models.py b/tests/modeling/test_models.py
index 57511dd..df3aebd 100644
--- a/tests/modeling/test_models.py
+++ b/tests/modeling/test_models.py
@@ -755,7 +755,7 @@
 
     @pytest.mark.parametrize(
         'is_valid, status, due_at, started_at, ended_at, max_attempts, attempts_count, '
-        'retry_interval, ignore_failure, name, operation_mapping, inputs, plugin_id',
+        'retry_interval, ignore_failure, name, operation_mapping, arguments, plugin_id',
         [
             (False, m_cls, now, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
             (False, Task.STARTED, m_cls, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
@@ -784,7 +784,7 @@
     )
     def test_task_model_creation(self, execution_storage, is_valid, status, due_at, started_at,
                                  ended_at, max_attempts, attempts_count, retry_interval,
-                                 ignore_failure, name, operation_mapping, inputs, plugin_id):
+                                 ignore_failure, name, operation_mapping, arguments, plugin_id):
         task = _test_model(
             is_valid=is_valid,
             storage=execution_storage,
@@ -800,8 +800,8 @@
                 retry_interval=retry_interval,
                 ignore_failure=ignore_failure,
                 name=name,
-                implementation=operation_mapping,
-                inputs=inputs,
+                function=operation_mapping,
+                arguments=arguments,
                 plugin_fk=plugin_id,
             ))
         if is_valid:
@@ -813,8 +813,8 @@
         def create_task(max_attempts):
             Task(execution_fk='eid',
                  name='name',
-                 implementation='',
-                 inputs={},
+                 function='',
+                 arguments={},
                  max_attempts=max_attempts)
         create_task(max_attempts=1)
         create_task(max_attempts=2)
diff --git a/tests/orchestrator/context/test_operation.py b/tests/orchestrator/context/test_operation.py
index 5d193bc..3dcfaa2 100644
--- a/tests/orchestrator/context/test_operation.py
+++ b/tests/orchestrator/context/test_operation.py
@@ -78,14 +78,14 @@
     interface_name = 'Standard'
     operation_name = 'create'
 
-    inputs = {'putput': True, 'holder_path': dataholder.path}
+    arguments = {'putput': True, 'holder_path': dataholder.path}
     node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
     interface = mock.models.create_interface(
         node.service,
         interface_name,
         operation_name,
-        operation_kwargs=dict(implementation=op_path(basic_node_operation, module_path=__name__),
-                              inputs=inputs)
+        operation_kwargs=dict(function=op_path(basic_node_operation, module_path=__name__),
+                              arguments=arguments)
     )
     node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
@@ -97,7 +97,7 @@
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
@@ -115,8 +115,8 @@
     )
     operations = interface.operations
     assert len(operations) == 1
-    assert dataholder['implementation'] == operations.values()[0].implementation             # pylint: disable=no-member
-    assert dataholder['inputs']['putput'] is True
+    assert dataholder['function'] == operations.values()[0].function             # pylint: disable=no-member
+    assert dataholder['arguments']['putput'] is True
 
     # Context based attributes (sugaring)
     assert dataholder['template_name'] == node.node_template.name
@@ -127,15 +127,14 @@
     interface_name = 'Configure'
     operation_name = 'post_configure'
 
-    inputs = {'putput': True, 'holder_path': dataholder.path}
+    arguments = {'putput': True, 'holder_path': dataholder.path}
     relationship = ctx.model.relationship.list()[0]
     interface = mock.models.create_interface(
         relationship.source_node.service,
         interface_name,
         operation_name,
-        operation_kwargs=dict(implementation=op_path(basic_relationship_operation,
-                                                     module_path=__name__),
-                              inputs=inputs),
+        operation_kwargs=dict(function=op_path(basic_relationship_operation, module_path=__name__),
+                              arguments=arguments),
     )
 
     relationship.interfaces[interface.name] = interface
@@ -148,7 +147,7 @@
                 relationship,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
@@ -160,8 +159,8 @@
     assert dataholder['actor_name'] == relationship.name
     assert interface_name in dataholder['task_name']
     operations = interface.operations
-    assert dataholder['implementation'] == operations.values()[0].implementation           # pylint: disable=no-member
-    assert dataholder['inputs']['putput'] is True
+    assert dataholder['function'] == operations.values()[0].function           # pylint: disable=no-member
+    assert dataholder['arguments']['putput'] is True
 
     # Context based attributes (sugaring)
     dependency_node_template = ctx.model.node_template.get_by_name(
@@ -197,8 +196,8 @@
         node.service,
         interface_name=interface_name,
         operation_name=operation_name,
-        operation_kwargs=dict(implementation=op_path(get_node_id, module_path=__name__),
-                              inputs={'holder_path': dataholder.path})
+        operation_kwargs=dict(function=op_path(get_node_id, module_path=__name__),
+                              arguments={'holder_path': dataholder.path})
     )
     node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
@@ -234,15 +233,15 @@
     node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
     filename = 'test_file'
     content = 'file content'
-    inputs = {'filename': filename, 'content': content}
+    arguments = {'filename': filename, 'content': content}
     interface = mock.models.create_interface(
         node.service,
         interface_name,
         operation_name,
         operation_kwargs=dict(
-            implementation='{0}.{1}'.format(__name__, _test_plugin_workdir.__name__),
+            function='{0}.{1}'.format(__name__, _test_plugin_workdir.__name__),
             plugin=plugin,
-            inputs=inputs)
+            arguments=arguments)
     )
     node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
@@ -253,7 +252,7 @@
             node,
             interface_name=interface_name,
             operation_name=operation_name,
-            inputs=inputs))
+            arguments=arguments))
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
     expected_file = tmpdir.join('workdir', 'plugins', str(ctx.service.id),
@@ -280,7 +279,7 @@
 
     node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
 
-    inputs = {
+    arguments = {
         'op_start': 'op_start',
         'op_end': 'op_end',
     }
@@ -289,8 +288,8 @@
         interface_name,
         operation_name,
         operation_kwargs=dict(
-            implementation=op_path(logged_operation, module_path=__name__),
-            inputs=inputs)
+            function=op_path(logged_operation, module_path=__name__),
+            arguments=arguments)
     )
     node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
@@ -302,19 +301,19 @@
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
-    _assert_loggins(ctx, inputs)
+    _assert_loggins(ctx, arguments)
 
 
 def test_relationship_operation_logging(ctx, executor):
     interface_name, operation_name = mock.operations.RELATIONSHIP_OPERATIONS_INSTALL[0]
 
     relationship = ctx.model.relationship.list()[0]
-    inputs = {
+    arguments = {
         'op_start': 'op_start',
         'op_end': 'op_end',
     }
@@ -322,8 +321,8 @@
         relationship.source_node.service,
         interface_name,
         operation_name,
-        operation_kwargs=dict(implementation=op_path(logged_operation, module_path=__name__),
-                              inputs=inputs)
+        operation_kwargs=dict(function=op_path(logged_operation, module_path=__name__),
+                              arguments=arguments)
     )
     relationship.interfaces[interface.name] = interface
     ctx.model.relationship.update(relationship)
@@ -335,12 +334,12 @@
                 relationship,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
-    _assert_loggins(ctx, inputs)
+    _assert_loggins(ctx, arguments)
 
 
 def test_attribute_consumption(ctx, executor, dataholder):
@@ -349,15 +348,15 @@
 
     source_node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
 
-    inputs = {'dict_': {'key': 'value'},
-              'set_test_dict': {'key2': 'value2'}}
+    arguments = {'dict_': {'key': 'value'},
+                 'set_test_dict': {'key2': 'value2'}}
     interface = mock.models.create_interface(
         source_node.service,
         node_int_name,
         node_op_name,
         operation_kwargs=dict(
-            implementation=op_path(attribute_altering_operation, module_path=__name__),
-            inputs=inputs)
+            function=op_path(attribute_altering_operation, module_path=__name__),
+            arguments=arguments)
     )
     source_node.interfaces[interface.name] = interface
     ctx.model.node.update(source_node)
@@ -372,8 +371,8 @@
         rel_int_name,
         rel_op_name,
         operation_kwargs=dict(
-            implementation=op_path(attribute_consuming_operation, module_path=__name__),
-            inputs={'holder_path': dataholder.path}
+            function=op_path(attribute_consuming_operation, module_path=__name__),
+            arguments={'holder_path': dataholder.path}
         )
     )
     relationship.interfaces[interface.name] = interface
@@ -387,7 +386,7 @@
                 source_node,
                 interface_name=node_int_name,
                 operation_name=node_op_name,
-                inputs=inputs
+                arguments=arguments
             ),
             api.task.OperationTask(
                 relationship,
@@ -411,8 +410,7 @@
            dataholder['key2'] == 'value2'
 
 
-def _assert_loggins(ctx, inputs):
-
+def _assert_loggins(ctx, arguments):
     # The logs should contain the following: Workflow Start, Operation Start, custom operation
     # log string (op_start), custom operation log string (op_end), Operation End, Workflow End.
 
@@ -432,11 +430,11 @@
     assert all(l.execution == execution for l in logs)
     assert all(l in logs and l.task == task for l in task.logs)
 
-    op_start_log = [l for l in logs if inputs['op_start'] in l.msg and l.level.lower() == 'info']
+    op_start_log = [l for l in logs if arguments['op_start'] in l.msg and l.level.lower() == 'info']
     assert len(op_start_log) == 1
     op_start_log = op_start_log[0]
 
-    op_end_log = [l for l in logs if inputs['op_end'] in l.msg and l.level.lower() == 'debug']
+    op_end_log = [l for l in logs if arguments['op_end'] in l.msg and l.level.lower() == 'debug']
     assert len(op_end_log) == 1
     op_end_log = op_end_log[0]
 
@@ -445,10 +443,10 @@
 
 @operation
 def logged_operation(ctx, **_):
-    ctx.logger.info(ctx.task.inputs['op_start'].value)
+    ctx.logger.info(ctx.task.arguments['op_start'].value)
     # enables to check the relation between the created_at field properly
     time.sleep(1)
-    ctx.logger.debug(ctx.task.inputs['op_end'].value)
+    ctx.logger.debug(ctx.task.arguments['op_end'].value)
 
 
 @operation
@@ -477,8 +475,8 @@
 
     holder['actor_name'] = ctx.task.actor.name
     holder['task_name'] = ctx.task.name
-    holder['implementation'] = ctx.task.implementation
-    holder['inputs'] = dict(i.unwrap() for i in ctx.task.inputs.values())
+    holder['function'] = ctx.task.function
+    holder['arguments'] = dict(i.unwrapped for i in ctx.task.arguments.values())
 
 
 @operation
diff --git a/tests/orchestrator/context/test_serialize.py b/tests/orchestrator/context/test_serialize.py
index 8a5db6f..4db7bf4 100644
--- a/tests/orchestrator/context/test_serialize.py
+++ b/tests/orchestrator/context/test_serialize.py
@@ -47,7 +47,7 @@
         node.service,
         'test',
         'op',
-        operation_kwargs=dict(implementation=_operation_mapping(),
+        operation_kwargs=dict(function=_operation_mapping(),
                               plugin=plugin)
     )
     node.interfaces[interface.name] = interface
@@ -60,8 +60,8 @@
 def _mock_operation(ctx):
     # We test several things in this operation
     # ctx.task, ctx.node, etc... tell us that the model storage was properly re-created
-    # a correct ctx.task.implementation tells us we kept the correct task_id
-    assert ctx.task.implementation == _operation_mapping()
+    # a correct ctx.task.function tells us we kept the correct task_id
+    assert ctx.task.function == _operation_mapping()
     # a correct ctx.node.name tells us we kept the correct actor_id
     assert ctx.node.name == mock.models.DEPENDENCY_NODE_NAME
     # a correct ctx.name tells us we kept the correct name
diff --git a/tests/orchestrator/context/test_toolbelt.py b/tests/orchestrator/context/test_toolbelt.py
index fc34907..326ce83 100644
--- a/tests/orchestrator/context/test_toolbelt.py
+++ b/tests/orchestrator/context/test_toolbelt.py
@@ -86,12 +86,12 @@
     interface_name = 'Standard'
     operation_name = 'create'
     _, dependency_node, _, _, _ = _get_elements(workflow_context)
-    inputs = {'putput': True, 'holder_path': dataholder.path}
+    arguments = {'putput': True, 'holder_path': dataholder.path}
     interface = mock.models.create_interface(
         dependency_node.service,
         interface_name=interface_name,
         operation_name=operation_name,
-        operation_kwargs=dict(implementation=op_path(host_ip, module_path=__name__), inputs=inputs)
+        operation_kwargs=dict(function=op_path(host_ip, module_path=__name__), arguments=arguments)
     )
     dependency_node.interfaces[interface.name] = interface
     dependency_node.attributes['ip'] = models.Parameter.wrap('ip', '1.1.1.1')
@@ -105,7 +105,7 @@
                 dependency_node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
@@ -118,13 +118,13 @@
     interface_name = 'Configure'
     operation_name = 'post_configure'
     _, _, _, _, relationship = _get_elements(workflow_context)
-    inputs = {'putput': True, 'holder_path': dataholder.path}
+    arguments = {'putput': True, 'holder_path': dataholder.path}
     interface = mock.models.create_interface(
         relationship.source_node.service,
         interface_name=interface_name,
         operation_name=operation_name,
-        operation_kwargs=dict(implementation=op_path(relationship_operation, module_path=__name__),
-                              inputs=inputs)
+        operation_kwargs=dict(function=op_path(relationship_operation, module_path=__name__),
+                              arguments=arguments)
     )
     relationship.interfaces[interface.name] = interface
     workflow_context.model.relationship.update(relationship)
@@ -136,7 +136,7 @@
                 relationship,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs
+                arguments=arguments
             )
         )
 
diff --git a/tests/orchestrator/execution_plugin/test_local.py b/tests/orchestrator/execution_plugin/test_local.py
index d9115e1..d792a57 100644
--- a/tests/orchestrator/execution_plugin/test_local.py
+++ b/tests/orchestrator/execution_plugin/test_local.py
@@ -199,7 +199,7 @@
         props = self._run(
             executor, workflow_context,
             script_path=script_path,
-            inputs={'key': 'value'})
+            arguments={'key': 'value'})
         assert props['key'].value == 'value'
 
     @pytest.mark.parametrize(
@@ -460,10 +460,10 @@
              script_path,
              process=None,
              env_var='value',
-             inputs=None):
+             arguments=None):
         local_script_path = script_path
         script_path = os.path.basename(local_script_path) if local_script_path else ''
-        inputs = inputs or {}
+        arguments = arguments or {}
         process = process or {}
         if script_path:
             workflow_context.resource.service.upload(
@@ -471,7 +471,7 @@
                 source=local_script_path,
                 path=script_path)
 
-        inputs.update({
+        arguments.update({
             'script_path': script_path,
             'process': process,
             'input_as_env_var': env_var
@@ -485,17 +485,17 @@
                 'test',
                 'op',
                 operation_kwargs=dict(
-                    implementation='{0}.{1}'.format(
+                    function='{0}.{1}'.format(
                         operations.__name__,
                         operations.run_script_locally.__name__),
-                    inputs=inputs)
+                    arguments=arguments)
             )
             node.interfaces[interface.name] = interface
             graph.add_tasks(api.task.OperationTask(
                 node,
                 interface_name='test',
                 operation_name='op',
-                inputs=inputs))
+                arguments=arguments))
             return graph
         tasks_graph = mock_workflow(ctx=workflow_context)  # pylint: disable=no-value-for-parameter
         eng = engine.Engine(
diff --git a/tests/orchestrator/execution_plugin/test_ssh.py b/tests/orchestrator/execution_plugin/test_ssh.py
index 92d250e..899a007 100644
--- a/tests/orchestrator/execution_plugin/test_ssh.py
+++ b/tests/orchestrator/execution_plugin/test_ssh.py
@@ -217,7 +217,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
-            inputs = {
+            arguments = {
                 'script_path': script_path,
                 'fabric_env': _FABRIC_ENV,
                 'process': process,
@@ -226,30 +226,30 @@
                 'test_operation': '',
             }
             if hide_output:
-                inputs['hide_output'] = hide_output
+                arguments['hide_output'] = hide_output
             if commands:
-                inputs['commands'] = commands
+                arguments['commands'] = commands
             interface = mock.models.create_interface(
                 node.service,
                 'test',
                 'op',
                 operation_kwargs=dict(
-                    implementation='{0}.{1}'.format(
+                    function='{0}.{1}'.format(
                         operations.__name__,
                         operation.__name__),
-                    inputs=inputs)
+                    arguments=arguments)
             )
             node.interfaces[interface.name] = interface
 
             ops = []
             for test_operation in test_operations:
-                op_inputs = inputs.copy()
-                op_inputs['test_operation'] = test_operation
+                op_arguments = arguments.copy()
+                op_arguments['test_operation'] = test_operation
                 ops.append(api.task.OperationTask(
                     node,
                     interface_name='test',
                     operation_name='op',
-                    inputs=op_inputs))
+                    arguments=op_arguments))
 
             graph.sequence(*ops)
             return graph
diff --git a/tests/orchestrator/test_workflow_runner.py b/tests/orchestrator/test_workflow_runner.py
index 7374e50..3646339 100644
--- a/tests/orchestrator/test_workflow_runner.py
+++ b/tests/orchestrator/test_workflow_runner.py
@@ -48,8 +48,7 @@
     workflow = models.Operation(
         name='test_workflow',
         service=service,
-        implementation='nonexistent.workflow.implementation',
-        inputs={})
+        function='nonexistent.workflow.implementation')
     service.workflows['test_workflow'] = workflow
 
     with pytest.raises(exceptions.WorkflowImplementationNotFoundError):
@@ -190,7 +189,7 @@
 def test_execution_inputs_undeclared_inputs(request):
     mock_workflow = _setup_mock_workflow_in_service(request)
 
-    with pytest.raises(modeling_exceptions.UndeclaredInputsException):
+    with pytest.raises(modeling_exceptions.UndeclaredParametersException):
         _create_workflow_runner(request, mock_workflow, inputs={'undeclared_input': 'value'})
 
 
@@ -198,7 +197,7 @@
     mock_workflow = _setup_mock_workflow_in_service(
         request, inputs={'required_input': models.Parameter.wrap('required_input', value=None)})
 
-    with pytest.raises(modeling_exceptions.MissingRequiredInputsException):
+    with pytest.raises(modeling_exceptions.MissingRequiredParametersException):
         _create_workflow_runner(request, mock_workflow, inputs={})
 
 
@@ -206,13 +205,13 @@
     mock_workflow = _setup_mock_workflow_in_service(
         request, inputs={'input': models.Parameter.wrap('input', 'value')})
 
-    with pytest.raises(modeling_exceptions.InputsOfWrongTypeException):
+    with pytest.raises(modeling_exceptions.ParametersOfWrongTypeException):
         _create_workflow_runner(request, mock_workflow, inputs={'input': 5})
 
 
 def test_execution_inputs_builtin_workflow_with_inputs(request):
     # built-in workflows don't have inputs
-    with pytest.raises(modeling_exceptions.UndeclaredInputsException):
+    with pytest.raises(modeling_exceptions.UndeclaredParametersException):
         _create_workflow_runner(request, 'install', inputs={'undeclared_input': 'value'})
 
 
@@ -259,8 +258,9 @@
     workflow = models.Operation(
         name=mock_workflow_name,
         service=service,
-        implementation='workflow.mock_workflow',
-        inputs=inputs or {})
+        function='workflow.mock_workflow',
+        inputs=inputs or {},
+        arguments=inputs or {})
     service.workflows[mock_workflow_name] = workflow
     return mock_workflow_name
 
diff --git a/tests/orchestrator/workflows/api/test_task.py b/tests/orchestrator/workflows/api/test_task.py
index 642c785..9d91b6b 100644
--- a/tests/orchestrator/workflows/api/test_task.py
+++ b/tests/orchestrator/workflows/api/test_task.py
@@ -44,15 +44,15 @@
         plugin = mock.models.create_plugin('test_plugin', '0.1')
         ctx.model.node.update(plugin)
 
-        inputs = {'test_input': True}
+        arguments = {'test_input': True}
 
         interface = mock.models.create_interface(
             ctx.service,
             interface_name,
             operation_name,
             operation_kwargs=dict(plugin=plugin,
-                                  implementation='op_path',
-                                  inputs=inputs),)
+                                  function='op_path',
+                                  arguments=arguments),)
 
         node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
         node.interfaces[interface_name] = interface
@@ -66,7 +66,7 @@
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs,
+                arguments=arguments,
                 max_attempts=max_attempts,
                 retry_interval=retry_interval,
                 ignore_failure=ignore_failure)
@@ -77,9 +77,9 @@
             interface=interface_name,
             operation=operation_name
         )
-        assert api_task.implementation == 'op_path'
+        assert api_task.function == 'op_path'
         assert api_task.actor == node
-        assert api_task.inputs['test_input'].value is True
+        assert api_task.arguments['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
         assert api_task.ignore_failure == ignore_failure
@@ -92,15 +92,15 @@
         plugin = mock.models.create_plugin('test_plugin', '0.1')
         ctx.model.plugin.update(plugin)
 
-        inputs = {'test_input': True}
+        arguments = {'test_input': True}
 
         interface = mock.models.create_interface(
             ctx.service,
             interface_name,
             operation_name,
             operation_kwargs=dict(plugin=plugin,
-                                  implementation='op_path',
-                                  inputs=inputs)
+                                  function='op_path',
+                                  arguments=arguments)
         )
 
         relationship = ctx.model.relationship.list()[0]
@@ -113,7 +113,7 @@
                 relationship,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs,
+                arguments=arguments,
                 max_attempts=max_attempts,
                 retry_interval=retry_interval)
 
@@ -123,9 +123,9 @@
             interface=interface_name,
             operation=operation_name
         )
-        assert api_task.implementation == 'op_path'
+        assert api_task.function == 'op_path'
         assert api_task.actor == relationship
-        assert api_task.inputs['test_input'].value is True
+        assert api_task.arguments['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
         assert api_task.plugin.name == 'test_plugin'
@@ -137,15 +137,15 @@
         plugin = mock.models.create_plugin('test_plugin', '0.1')
         ctx.model.node.update(plugin)
 
-        inputs = {'test_input': True}
+        arguments = {'test_input': True}
 
         interface = mock.models.create_interface(
             ctx.service,
             interface_name,
             operation_name,
             operation_kwargs=dict(plugin=plugin,
-                                  implementation='op_path',
-                                  inputs=inputs)
+                                  function='op_path',
+                                  arguments=arguments)
         )
 
         relationship = ctx.model.relationship.list()[0]
@@ -158,7 +158,7 @@
                 relationship,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs,
+                arguments=arguments,
                 max_attempts=max_attempts,
                 retry_interval=retry_interval)
 
@@ -168,9 +168,9 @@
             interface=interface_name,
             operation=operation_name
         )
-        assert api_task.implementation == 'op_path'
+        assert api_task.function == 'op_path'
         assert api_task.actor == relationship
-        assert api_task.inputs['test_input'].value is True
+        assert api_task.arguments['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
         assert api_task.plugin.name == 'test_plugin'
@@ -189,7 +189,7 @@
             interface_name,
             operation_name,
             operation_kwargs=dict(plugin=plugin,
-                                  implementation='op_path'))
+                                  function='op_path'))
         dependency_node.interfaces[interface_name] = interface
 
         with context.workflow.current.push(ctx):
@@ -198,7 +198,7 @@
                 interface_name=interface_name,
                 operation_name=operation_name)
 
-        assert task.inputs == {}
+        assert task.arguments == {}
         assert task.retry_interval == ctx._task_retry_interval
         assert task.max_attempts == ctx._task_max_attempts
         assert task.ignore_failure == ctx._task_ignore_failure
diff --git a/tests/orchestrator/workflows/builtin/test_execute_operation.py b/tests/orchestrator/workflows/builtin/test_execute_operation.py
index 4cddbe6..88818ca 100644
--- a/tests/orchestrator/workflows/builtin/test_execute_operation.py
+++ b/tests/orchestrator/workflows/builtin/test_execute_operation.py
@@ -35,7 +35,7 @@
         ctx.service,
         interface_name,
         operation_name,
-        operation_kwargs={'implementation': 'test'}
+        operation_kwargs=dict(function='test')
     )
     node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
diff --git a/tests/orchestrator/workflows/core/test_engine.py b/tests/orchestrator/workflows/core/test_engine.py
index 8c0705b..6d2836c 100644
--- a/tests/orchestrator/workflows/core/test_engine.py
+++ b/tests/orchestrator/workflows/core/test_engine.py
@@ -57,17 +57,17 @@
     @staticmethod
     def _op(ctx,
             func,
-            inputs=None,
+            arguments=None,
             max_attempts=None,
             retry_interval=None,
             ignore_failure=None):
         node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
         interface_name = 'aria.interfaces.lifecycle'
-        operation_kwargs = dict(implementation='{name}.{func.__name__}'.format(
+        operation_kwargs = dict(function='{name}.{func.__name__}'.format(
             name=__name__, func=func))
-        if inputs:
-            # the operation has to declare the inputs before those may be passed
-            operation_kwargs['inputs'] = inputs
+        if arguments:
+            # the operation has to declare the arguments before those may be passed
+            operation_kwargs['arguments'] = arguments
         operation_name = 'create'
         interface = mock.models.create_interface(node.service, interface_name, operation_name,
                                                  operation_kwargs=operation_kwargs)
@@ -77,7 +77,7 @@
             node,
             interface_name='aria.interfaces.lifecycle',
             operation_name=operation_name,
-            inputs=inputs or {},
+            arguments=arguments,
             max_attempts=max_attempts,
             retry_interval=retry_interval,
             ignore_failure=ignore_failure,
@@ -189,8 +189,8 @@
     def test_two_tasks_execution_order(self, workflow_context, executor):
         @workflow
         def mock_workflow(ctx, graph):
-            op1 = self._op(ctx, func=mock_ordered_task, inputs={'counter': 1})
-            op2 = self._op(ctx, func=mock_ordered_task, inputs={'counter': 2})
+            op1 = self._op(ctx, func=mock_ordered_task, arguments={'counter': 1})
+            op2 = self._op(ctx, func=mock_ordered_task, arguments={'counter': 2})
             graph.sequence(op1, op2)
         self._execute(
             workflow_func=mock_workflow,
@@ -204,9 +204,9 @@
     def test_stub_and_subworkflow_execution(self, workflow_context, executor):
         @workflow
         def sub_workflow(ctx, graph):
-            op1 = self._op(ctx, func=mock_ordered_task, inputs={'counter': 1})
+            op1 = self._op(ctx, func=mock_ordered_task, arguments={'counter': 1})
             op2 = api.task.StubTask()
-            op3 = self._op(ctx, func=mock_ordered_task, inputs={'counter': 2})
+            op3 = self._op(ctx, func=mock_ordered_task, arguments={'counter': 2})
             graph.sequence(op1, op2, op3)
 
         @workflow
@@ -229,7 +229,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             operations = (
-                self._op(ctx, func=mock_sleep_task, inputs=dict(seconds=0.1))
+                self._op(ctx, func=mock_sleep_task, arguments=dict(seconds=0.1))
                 for _ in range(number_of_tasks)
             )
             return graph.sequence(*operations)
@@ -270,7 +270,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 1},
+                          arguments={'failure_count': 1},
                           max_attempts=2)
             graph.add_tasks(op)
         self._execute(
@@ -286,7 +286,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 2},
+                          arguments={'failure_count': 2},
                           max_attempts=2)
             graph.add_tasks(op)
         with pytest.raises(exceptions.ExecutorException):
@@ -303,7 +303,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 1},
+                          arguments={'failure_count': 1},
                           max_attempts=3)
             graph.add_tasks(op)
         self._execute(
@@ -319,7 +319,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 2},
+                          arguments={'failure_count': 2},
                           max_attempts=3)
             graph.add_tasks(op)
         self._execute(
@@ -335,7 +335,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 1},
+                          arguments={'failure_count': 1},
                           max_attempts=-1)
             graph.add_tasks(op)
         self._execute(
@@ -361,7 +361,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
-                          inputs={'failure_count': 1},
+                          arguments={'failure_count': 1},
                           max_attempts=2,
                           retry_interval=retry_interval)
             graph.add_tasks(op)
@@ -382,7 +382,7 @@
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_conditional_failure_task,
                           ignore_failure=True,
-                          inputs={'failure_count': 100},
+                          arguments={'failure_count': 100},
                           max_attempts=100)
             graph.add_tasks(op)
         self._execute(
@@ -405,7 +405,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_task_retry,
-                          inputs={'message': self.message},
+                          arguments={'message': self.message},
                           retry_interval=default_retry_interval,
                           max_attempts=2)
             graph.add_tasks(op)
@@ -429,8 +429,8 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_task_retry,
-                          inputs={'message': self.message,
-                                  'retry_interval': custom_retry_interval},
+                          arguments={'message': self.message,
+                                     'retry_interval': custom_retry_interval},
                           retry_interval=default_retry_interval,
                           max_attempts=2)
             graph.add_tasks(op)
@@ -452,7 +452,7 @@
         @workflow
         def mock_workflow(ctx, graph):
             op = self._op(ctx, func=mock_task_abort,
-                          inputs={'message': self.message},
+                          arguments={'message': self.message},
                           retry_interval=100,
                           max_attempts=100)
             graph.add_tasks(op)
diff --git a/tests/orchestrator/workflows/core/test_events.py b/tests/orchestrator/workflows/core/test_events.py
index 184071d..6d542e9 100644
--- a/tests/orchestrator/workflows/core/test_events.py
+++ b/tests/orchestrator/workflows/core/test_events.py
@@ -110,8 +110,7 @@
         service=node.service,
         interface_name=interface_name,
         operation_name=op_name,
-        operation_kwargs=dict(implementation='{name}.{func.__name__}'.format(name=__name__,
-                                                                             func=func)))
+        operation_kwargs=dict(function='{name}.{func.__name__}'.format(name=__name__, func=func)))
     node.interfaces[interface.name] = interface
 
     eng = engine.Engine(executor=ThreadExecutor(),
diff --git a/tests/orchestrator/workflows/core/test_task.py b/tests/orchestrator/workflows/core/test_task.py
index e488933..a717e19 100644
--- a/tests/orchestrator/workflows/core/test_task.py
+++ b/tests/orchestrator/workflows/core/test_task.py
@@ -43,7 +43,7 @@
         relationship.source_node.service,
         RELATIONSHIP_INTERFACE_NAME,
         RELATIONSHIP_OPERATION_NAME,
-        operation_kwargs={'implementation': 'test'}
+        operation_kwargs=dict(function='test')
     )
     relationship.interfaces[interface.name] = interface
     context.model.relationship.update(relationship)
@@ -53,7 +53,7 @@
         node.service,
         NODE_INTERFACE_NAME,
         NODE_OPERATION_NAME,
-        operation_kwargs={'implementation': 'test'}
+        operation_kwargs=dict(function='test')
     )
     node.interfaces[interface.name] = interface
     context.model.node.update(node)
@@ -92,7 +92,7 @@
             node.service,
             NODE_INTERFACE_NAME,
             NODE_OPERATION_NAME,
-            operation_kwargs=dict(plugin=storage_plugin, implementation='test')
+            operation_kwargs=dict(plugin=storage_plugin, function='test')
         )
         node.interfaces[interface.name] = interface
         ctx.model.node.update(node)
@@ -103,9 +103,9 @@
         assert storage_task.actor == core_task.context.node._original_model
         assert core_task.model_task == storage_task
         assert core_task.name == api_task.name
-        assert core_task.implementation == api_task.implementation
+        assert core_task.function == api_task.function
         assert core_task.actor == api_task.actor == node
-        assert core_task.inputs == api_task.inputs == storage_task.inputs
+        assert core_task.arguments == api_task.arguments == storage_task.arguments
         assert core_task.plugin == storage_plugin
 
     def test_relationship_operation_task_creation(self, ctx):
diff --git a/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py b/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py
index 2a96d01..5dd2855 100644
--- a/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py
+++ b/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py
@@ -32,7 +32,7 @@
         node.service,
         interface_name,
         operation_name,
-        operation_kwargs={'implementation': 'test'}
+        operation_kwargs=dict(function='test')
     )
     node.interfaces[interface.name] = interface
     task_context.model.node.update(node)
@@ -108,9 +108,9 @@
 def _assert_execution_is_api_task(execution_task, api_task):
     assert execution_task.id == api_task.id
     assert execution_task.name == api_task.name
-    assert execution_task.implementation == api_task.implementation
+    assert execution_task.function == api_task.function
     assert execution_task.actor == api_task.actor
-    assert execution_task.inputs == api_task.inputs
+    assert execution_task.arguments == api_task.arguments
 
 
 def _get_task_by_name(task_name, graph):
diff --git a/tests/orchestrator/workflows/executor/__init__.py b/tests/orchestrator/workflows/executor/__init__.py
index 41c4b2e..ac6d325 100644
--- a/tests/orchestrator/workflows/executor/__init__.py
+++ b/tests/orchestrator/workflows/executor/__init__.py
@@ -25,11 +25,11 @@
 
     INFINITE_RETRIES = models.Task.INFINITE_RETRIES
 
-    def __init__(self, implementation, inputs=None, plugin=None, storage=None):
-        self.implementation = self.name = implementation
+    def __init__(self, function, arguments=None, plugin=None, storage=None):
+        self.function = self.name = function
         self.plugin_fk = plugin.id if plugin else None
         self.plugin = plugin or None
-        self.inputs = inputs or {}
+        self.arguments = arguments or {}
         self.states = []
         self.exception = None
         self.id = str(uuid.uuid4())
diff --git a/tests/orchestrator/workflows/executor/test_executor.py b/tests/orchestrator/workflows/executor/test_executor.py
index 29cb0e8..9ddaef4 100644
--- a/tests/orchestrator/workflows/executor/test_executor.py
+++ b/tests/orchestrator/workflows/executor/test_executor.py
@@ -38,16 +38,16 @@
 from . import MockTask
 
 
-def _get_implementation(func):
+def _get_function(func):
     return '{module}.{func.__name__}'.format(module=__name__, func=func)
 
 
 def execute_and_assert(executor, storage=None):
     expected_value = 'value'
-    successful_task = MockTask(_get_implementation(mock_successful_task), storage=storage)
-    failing_task = MockTask(_get_implementation(mock_failing_task), storage=storage)
-    task_with_inputs = MockTask(_get_implementation(mock_task_with_input),
-                                inputs={'input': models.Parameter.wrap('input', 'value')},
+    successful_task = MockTask(_get_function(mock_successful_task), storage=storage)
+    failing_task = MockTask(_get_function(mock_failing_task), storage=storage)
+    task_with_inputs = MockTask(_get_function(mock_task_with_input),
+                                arguments={'input': models.Parameter.wrap('input', 'value')},
                                 storage=storage)
 
     for task in [successful_task, failing_task, task_with_inputs]:
diff --git a/tests/orchestrator/workflows/executor/test_process_executor.py b/tests/orchestrator/workflows/executor/test_process_executor.py
index e6333e8..058190e 100644
--- a/tests/orchestrator/workflows/executor/test_process_executor.py
+++ b/tests/orchestrator/workflows/executor/test_process_executor.py
@@ -66,7 +66,7 @@
     def test_closed(self, executor):
         executor.close()
         with pytest.raises(RuntimeError) as exc_info:
-            executor.execute(task=MockTask(implementation='some.implementation'))
+            executor.execute(task=MockTask(function='some.function'))
         assert 'closed' in exc_info.value.message
 
 
diff --git a/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py b/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py
index 92f0fc4..6163c09 100644
--- a/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py
+++ b/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py
@@ -67,7 +67,7 @@
     key = 'key'
     first_value = 'value1'
     second_value = 'value2'
-    inputs = {
+    arguments = {
         'lock_files': lock_files,
         'key': key,
         'first_value': first_value,
@@ -80,8 +80,8 @@
         node.service,
         interface_name,
         operation_name,
-        operation_kwargs=dict(implementation='{0}.{1}'.format(__name__, func.__name__),
-                              inputs=inputs)
+        operation_kwargs=dict(function='{0}.{1}'.format(__name__, func.__name__),
+                              arguments=arguments)
     )
     node.interfaces[interface.name] = interface
     context.model.node.update(node)
@@ -93,12 +93,12 @@
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs),
+                arguments=arguments),
             api.task.OperationTask(
                 node,
                 interface_name=interface_name,
                 operation_name=operation_name,
-                inputs=inputs)
+                arguments=arguments)
         )
 
     signal = events.on_failure_task_signal
diff --git a/tests/orchestrator/workflows/executor/test_process_executor_extension.py b/tests/orchestrator/workflows/executor/test_process_executor_extension.py
index 30b23ed..e4944df 100644
--- a/tests/orchestrator/workflows/executor/test_process_executor_extension.py
+++ b/tests/orchestrator/workflows/executor/test_process_executor_extension.py
@@ -27,7 +27,7 @@
 
 
 def test_decorate_extension(context, executor):
-    inputs = {'input1': 1, 'input2': 2}
+    arguments = {'arg1': 1, 'arg2': 2}
 
     def get_node(ctx):
         return ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
@@ -41,24 +41,23 @@
             ctx.service,
             interface_name,
             operation_name,
-            operation_kwargs=dict(implementation='{0}.{1}'.format(__name__,
-                                                                  _mock_operation.__name__),
-                                  inputs=inputs)
+            operation_kwargs=dict(function='{0}.{1}'.format(__name__, _mock_operation.__name__),
+                                  arguments=arguments)
         )
         node.interfaces[interface.name] = interface
         task = api.task.OperationTask(
             node,
             interface_name=interface_name,
             operation_name=operation_name,
-            inputs=inputs)
+            arguments=arguments)
         graph.add_tasks(task)
         return graph
     graph = mock_workflow(ctx=context)  # pylint: disable=no-value-for-parameter
     eng = engine.Engine(executor=executor, workflow_context=context, tasks_graph=graph)
     eng.execute()
     out = get_node(context).attributes.get('out').value
-    assert out['wrapper_inputs'] == inputs
-    assert out['function_inputs'] == inputs
+    assert out['wrapper_arguments'] == arguments
+    assert out['function_arguments'] == arguments
 
 
 @extension.process_executor
@@ -66,16 +65,16 @@
 
     def decorate(self):
         def decorator(function):
-            def wrapper(ctx, **operation_inputs):
-                ctx.node.attributes['out'] = {'wrapper_inputs': operation_inputs}
-                function(ctx=ctx, **operation_inputs)
+            def wrapper(ctx, **operation_arguments):
+                ctx.node.attributes['out'] = {'wrapper_arguments': operation_arguments}
+                function(ctx=ctx, **operation_arguments)
             return wrapper
         return decorator
 
 
 @operation
-def _mock_operation(ctx, **operation_inputs):
-    ctx.node.attributes['out']['function_inputs'] = operation_inputs
+def _mock_operation(ctx, **operation_arguments):
+    ctx.node.attributes['out']['function_arguments'] = operation_arguments
 
 
 @pytest.fixture
diff --git a/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py b/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py
index 2b628a0..2d80a3b 100644
--- a/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py
+++ b/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py
@@ -62,19 +62,19 @@
 
 
 def test_apply_tracked_changes_during_an_operation(context, executor):
-    inputs = {
+    arguments = {
         'committed': {'some': 'new', 'properties': 'right here'},
         'changed_but_refreshed': {'some': 'newer', 'properties': 'right there'}
     }
 
     expected_initial = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME).attributes
     out = _run_workflow(
-        context=context, executor=executor, op_func=_mock_updating_operation, inputs=inputs)
+        context=context, executor=executor, op_func=_mock_updating_operation, arguments=arguments)
 
     expected_after_update = expected_initial.copy()
-    expected_after_update.update(inputs['committed']) # pylint: disable=no-member
+    expected_after_update.update(arguments['committed']) # pylint: disable=no-member
     expected_after_change = expected_after_update.copy()
-    expected_after_change.update(inputs['changed_but_refreshed']) # pylint: disable=no-member
+    expected_after_change.update(arguments['changed_but_refreshed']) # pylint: disable=no-member
 
     assert out['initial'] == expected_initial
     assert out['after_update'] == expected_after_update
@@ -82,26 +82,26 @@
     assert out['after_refresh'] == expected_after_change
 
 
-def _run_workflow(context, executor, op_func, inputs=None):
+def _run_workflow(context, executor, op_func, arguments=None):
     @workflow
     def mock_workflow(ctx, graph):
         node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
         interface_name = 'test_interface'
         operation_name = 'operation'
-        wf_inputs = inputs or {}
+        wf_arguments = arguments or {}
         interface = mock.models.create_interface(
             ctx.service,
             interface_name,
             operation_name,
-            operation_kwargs=dict(implementation=_operation_mapping(op_func),
-                                  inputs=wf_inputs)
+            operation_kwargs=dict(function=_operation_mapping(op_func),
+                                  arguments=wf_arguments)
         )
         node.interfaces[interface.name] = interface
         task = api.task.OperationTask(
             node,
             interface_name=interface_name,
             operation_name=operation_name,
-            inputs=wf_inputs)
+            arguments=wf_arguments)
         graph.add_tasks(task)
         return graph
     graph = mock_workflow(ctx=context)  # pylint: disable=no-value-for-parameter
diff --git a/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml b/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml
index 8e80640..4d53f9b 100644
--- a/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml
+++ b/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml
@@ -104,6 +104,16 @@
         Maintenance:
           enable: juju > charm.maintenance_on
           disable: juju > charm.maintenance_off
+        Standard:
+          create:
+            implementation:
+              primary: create_node_cellar.sh
+              dependencies:
+                - "process.args.1 > { get_attribute: [ SELF, tosca_id ] }"
+                - "process.args.2 > { get_property: [ HOST, flavor_name ] }"
+                - ssh.user > admin
+                - ssh.password > '1234'
+                - ssh.use_sudo > true
       requirements:
         - database: node_cellar_database
       capabilities:
@@ -161,16 +171,7 @@
             relationship:
               interfaces:
                 Configure:
-                  target_changed:
-                    implementation:
-                      primary: changed.sh
-                      dependencies:
-                        #- { concat: [ process.args.1 >, mongodb ] }
-                        - process.args.1 > mongodb
-                        - process.args.2 > host
-                        - ssh.user > admin
-                        - ssh.password > 1234
-                        - ssh.use_sudo > true
+                  target_changed: changed.sh
 
     nginx:
       type: nginx.Nginx
@@ -251,6 +252,7 @@
         Standard:
           inputs:
             openstack_credential: { get_input: openstack_credential }
+          create: create_data_volume.sh
 
   groups: