Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/climate
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..8c3fa0d
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,28 @@
+Michael Joyce               <joyce@apache.org>          joyce                           <joyce@unknown>
+Michael Joyce               <joyce@apache.org>          mjjoyce                         <mjjoyce@unknown>
+Michael Joyce               <joyce@apache.org>                                          <mltjoyce@gmail.com>
+Cameron Eugene Goodale      <goodale@apache.org>        cgoodale                        <cgoodale@apache.org>
+Cameron Eugene Goodale      <goodale@apache.org>        cgoodale                        <cgoodale@unknown>
+Cameron Eugene Goodale      <goodale@apache.org>        Cameron Eugene Goodale          <cgoodale@apache.org>
+Cameron Eugene Goodale      <goodale@apache.org>        Cameron Eugene Goodale          <goodale@apache.org>
+Cameron Eugene Goodale      <goodale@apache.org>        cgoodale                        <sigep311@gmail.com>
+Cameron Eugene Goodale      <goodale@apache.org>        Cameron Goodale                 <goodale@apache.org>
+Shakeh Elisabeth Khudikyan  <skhudiky@apache.org>       skhudiky                        <skhudiky@unknown>
+Shakeh Elisabeth Khudikyan  <skhudiky@apache.org>       Shakeh                          <sekhudikyan@gmail.com>
+Maziyar Boustani            <boustani@apache.org>       Maziyar Boustani                <maziyar_b4@yahoo.com>
+Maziyar Boustani            <boustani@apache.org>       boustani                        <boustani@unknown>
+Lewis John McGibbney        <lewismc@apache.org>        Lewis John McGibbney            <lewis.j.mcgibbney@jpl.nasa.gov>
+Kim Whitehall               <whitehall@apache.org>      kwhitehall                      <k_whitehall@yahoo.com>
+Kim Whitehall               <whitehall@apache.org>      Kim Whitehall                   <k_whitehall@yahoo.com>
+Kim Whitehall               <whitehall@apache.org>      whitehall                       <whitehall@unknown>
+Kim Whitehall               <whitehall@apache.org>      Kim Whitehall                   <kwhitehall@users.noreply.github.com>
+Kim Whitehall               <whitehall@apache.org>      georgette                       <k_whitehall@yahoo.com>
+Huikyo Lee                  <huikyole@apache.org>       huikyole                        <huikyole@unknown>
+Ross Laidlaw                <rlaidlaw@apache.org>       rlaidlaw                        <rlaidlaw.open@gmail.com>
+Paul Michael Ramirez        <pramirez@apache.org>       Paul Ramirez
+Alex Goodman                <goodman@apache.org>        goodman                         <goodman@unknown>
+Alex Goodman                <goodman@apache.org>        bassdx                          <agoodman1120@gmail.com>
+Luca Cinquini               <luca@apache.org>           cinquini                        <cinquini@unknown>
+Denis Nadeau                <dnadeau@apache.org>        Denis Nadeau                    <dnadeau@esgcmor.gsfc.nasa.gov>
+Denis Nadeau                <dnadeau@apache.org>        dnadeau                         <dnadeau@unknown>
+Denis Nadeau                <dnadeau@apache.org>        Nadeau                          <dnadeau@GSSLA40018857.nccs.nasa.gov>
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000..fc66621
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,380 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# DEPRECATED
+include-ids=no
+
+# DEPRECATED
+symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=numpy
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__|_.*$
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-5
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=79
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=yes
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 9323133..1095b7b 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -22,6 +22,7 @@
 sys.path.insert(0, os.path.abspath('../../ocw'))
 sys.path.insert(0, os.path.abspath('../../ocw/data_source'))
 sys.path.insert(0, os.path.abspath('../../ocw-ui/backend'))
+sys.path.insert(0, os.path.abspath('../../ocw-config-runner'))
 
 
 # -- General configuration -----------------------------------------------------
@@ -59,9 +60,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '0.4'
+version = '1.0.0'
 # The full version, including alpha/beta/rc tags.
-release = '0.4-snapshot'
+release = '1.0.0-snapshot'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/docs/source/config/config_overview.rst b/docs/source/config/config_overview.rst
new file mode 100644
index 0000000..d3141f4
--- /dev/null
+++ b/docs/source/config/config_overview.rst
@@ -0,0 +1,85 @@
+Configuration File Overview
+===========================
+
+Apache Open Climate Workbench includes tools for creating and reading configuration files. Below is an explanation of the general configuration file structure, and in-depth look at the various configuration options, and explanations of how to use configuration files in an evaluation.
+
+Getting Started
+---------------
+
+OCW configuration files are written in `YAML <http://yaml.org/>`_ with type annotations that are supported by the `PyYAML library <http://pyyaml.org/wiki/PyYAMLDocumentation>`_. Let's look at an example configuration file to get started.
+
+.. code::
+
+    evaluation:
+        temporal_time_delta: 365
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+    datasets:
+        reference:
+            data_source: local
+            file_count: 1
+            path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+            variable: tasmax
+
+        targets:
+            - data_source: local
+              file_count: 1
+              path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+    metrics:
+        - Bias
+
+    plots:
+        - type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+    
+There are 4 main categories for configuration settings: Evaluation, Datasets, Metrics, and Plots.
+
+Evaluation Settings
+-------------------
+
+This is where you will set evaluation specific settings such as temporal and spatial bin sizes to use during dataset preparation. Visit the :doc:`Evaluation Settings <evaluation_settings>` page for additional information.
+
+Dataset Information
+-------------------
+
+The datasets section is where you specify all the datasets to use for an evaluation. You can specify what the reference dataset should be as well as giving a list of target datasets. Visit the :doc:`Dataset Information <dataset_information>` page for additional information.
+
+Metrics Information
+-------------------
+
+You will need to load some metrics if you want to get anything useful out of your evaluation. Visit the :doc:`Metrics Information <metrics_information>` page to learn how to specify the metrics that should be used in the evaluation.
+
+Plots Settings
+--------------
+
+This is where you specify what plots to make after running the evaluation. The :doc:`Plots Settings <plots_settings>` page provides examples for the supported plot types.
+
+Example Run
+-----------
+
+If you have tried the **simple_model_to_model_bias.py** example in the primary toolkit examples you can run the same evaluation but use a config file to do so instead of direct API scripting. From the **climate/ocw-config-runner/** directory run the following command to run the example::
+
+    python ocw_evaluation_from_config.py example/simple_model_to_model_bias.yaml
+
+.. note::
+
+    If you haven't run the previous example which downloads the necessary datasets this evaluation will fail. The necessary local files will not have been downloaded!
+
+Writing a Config File
+---------------------
+
+You can export an :class:`evaluation.Evaluation` object to a configuration file for easily repeatable evaluations. Checkout the documentation on the :doc:`configuration file writer API <config_writer>` for additional information.
diff --git a/docs/source/config/config_writer.rst b/docs/source/config/config_writer.rst
new file mode 100644
index 0000000..eb9bb38
--- /dev/null
+++ b/docs/source/config/config_writer.rst
@@ -0,0 +1,5 @@
+Configuration File Writer API
+=============================
+
+.. automodule:: configuration_writer
+    :members:
diff --git a/docs/source/config/dataset_information.rst b/docs/source/config/dataset_information.rst
new file mode 100644
index 0000000..3c710e3
--- /dev/null
+++ b/docs/source/config/dataset_information.rst
@@ -0,0 +1,89 @@
+Dataset Information
+===================
+
+Dataset configuration information is passed in the **datasets** section of the configuration file. You can specify one reference dataset and one or more target datasets for your evaluation::
+
+    datasets:
+        reference:
+            data_source: local
+            file_count: 1
+            path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+            variable: tasmax
+
+        targets:
+            - data_source: local
+              file_count: 1
+              path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+            - data_source: local
+              file_count: 1
+              path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+              variable: tasmax
+
+Each **data_source** module requires datasets to be passed in a slightly different manner. Below is an explanation of the format for each of the supported data sources.
+
+Local Dataset
+-------------
+.. code::
+
+    data_source: local
+    file_count: 1
+    path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+    variable: tasmax
+
+The **path** flag is the location where the dataset is located on your computer. The **variable** flag is the variable that should be pulled out of the NetCDF file once it has been opened. You pass any optional flags that are accepted by :func:`local.load_file` by using the **optional_args** flag::
+
+    data_source: local
+    file_count: 1
+    path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+    variable: tasmax
+    optional_args:
+        elevation_index=0,
+        name='foo'
+
+.. note::
+
+    The **file_count** flag is currently not used. It is there to support planned future functionality. However, you still need to specify it! Leave it as 1.
+
+
+RCMED Dataset
+-------------
+
+.. code::
+    
+    data_source: rcmed
+    dataset_id: 4
+    parameter_id: 32
+    min_lat: -10
+    max_lat: 10
+    min_lon: -20
+    max_lon: 20
+    start_time: 1997-01-01
+    end_time: 2000-01-01
+
+To load a dataset from the Jet Propulsion Laboratory's RCMED you will need to specify the above flags. The **dataset_id** and **parameter_id** are dataset specific and can be looked up on the `RCMES project website <https://rcmes.jpl.nasa.gov/content/rcmes-and-data>`_. Pretty much any common time format will be accepted for the start and end times. However, just to be safe you should try to stick with something very standard such as `ISO-8601 <http://www.w3.org/TR/NOTE-datetime>`_ formatted time values. You may also pass any optional parameters that are accepted by :func:`rcmed.parameter_dataset` with the **optional_args** flag.
+
+ESGF Dataset
+------------
+
+In order to load an ESGF dataset you will need to specify the following parameters in addition to having an ESGF login::
+
+    data_source: esgf
+    dataset_id: obs4MIPs.CNES.AVISO.mon.v1|esg-datanode.jpl.nasa.gov
+    variable: zosStderr
+    esgf_password: totallynotmypassword
+    esgf_username: totallynotmyusername
+
+The **dataset_id** and **variable** flags are ESGF dataset specific. You can locate them through and ESGF nodes search page. You may also pass any optional parameters that are accepted by :func:`esgf.load_dataset` with the **optional_args** flag.
+
+
+OpenDAP Dataset
+---------------
+
+A dataset can be downloaded from an OpenDAP URL with the following settings::
+
+    data_source: dap
+    url: http://test.opendap.org/dap/data/nc/sst.mnmean.nc.gz
+    variable: sst
+
+You may also pass any optional parameters that are accepted by :func:`dap.load` with the **optional_args** flag.
diff --git a/docs/source/config/evaluation_settings.rst b/docs/source/config/evaluation_settings.rst
new file mode 100644
index 0000000..f10973c
--- /dev/null
+++ b/docs/source/config/evaluation_settings.rst
@@ -0,0 +1,56 @@
+Evaluation Settings
+===================
+
+The evaluation settings section of the configuration file allows you to set attributes that are critical for making adjustments to the loaded datasets prior to an evaluation run. Here is an example evaluation settings section of a configuration file. Below, we'll look at each of the configuration options in detail.
+
+.. code::
+
+    evaluation:
+        temporal_time_delta: 365
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+Temporal Rebin
+--------------
+
+It is often necessary to temporally rebin datasets prior to an evaluation. The **temporal_time_delta** flag is where you can set the **temporal_resolution** parameter for :func:`dataset_processor.temporal_rebin`. The value that you pass here is interpreted as the number of days to assign to a :class:`datetime.timedelta` object before running the :func:`dataset_processor.temporal_rebin` function.
+
+.. note::
+
+    This attribute is only useful if you use the configuration data to create an :class:`evaluation.Evaluation` object with the :func:`evaluation_creation.generate_evaluation_from_config` config parser function.
+
+Spatial Regrid
+--------------
+
+.. note::
+
+    Some funcitonality here is still in development. Specifically, passing the spatial_regrid_* flags as lists of values.
+
+If you need to regrid your datasets onto a new lat/lon grid you will need to set the **spatial_regrid_lats** and **spatial_regrid_lons** options. These will be passed to the :func:`dataset_processor.spatial_regrid` function along with each dataset. There are two valid ways to pass these parameters. First, you can pass them as a list of all values::
+
+    evaluation:
+        spatial_regrid_lats: [-10, -5, 0, 5, 10]
+        spatial_regrid_lons: [-10, -5, 0, 5, 10]
+
+This is generally useful if you only need to pass a few parameters or if the sequence isn't easy to define as a valid **range** in Python. The other option is to pass **range** information as a tuple. This requires you to use `PyYAML's Python Type Annotations <http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_ but provides a far more compact representation::
+
+    evaluation:
+        spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+        spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+Using this style directly maps to a call to :func:`numpy.arange`::
+
+    # spatial_regrid_lats: !!python/tuple [-20, 20, 1] becomes
+    lats = numpy.arange(-20, 20, 1)
+
+Be sure to pay special attention to the end value for your interval. The :func:`numpy.arange` function does not include the end value in the returned interval.
+
+Subset Bounds
+-------------
+
+In order to subset the datasets down to an area of interest you will need to pass bounds information::
+
+    evaluation:
+        subset: [-10, 10, -20, 20, "1997-01-01", "2000-01-01"]
+
+Here you're passing the bounding lat/lon box with the first 4 values as well as the valid temporal range bounds with the starting and end time values. Pretty much any common time format will be accepted. However, just to be safe you should try to stick with something very standard such as `ISO-8601 <http://www.w3.org/TR/NOTE-datetime>`_ formatted time values.
diff --git a/docs/source/config/metrics_information.rst b/docs/source/config/metrics_information.rst
new file mode 100644
index 0000000..1935e87
--- /dev/null
+++ b/docs/source/config/metrics_information.rst
@@ -0,0 +1,12 @@
+Metrics Information
+===================
+
+.. note::
+
+    At the moment, you can only load metrics that are in :mod:`ocw.metrics`. In the future you will also be able to specify user defined metrics here as well. However, as a work around you can define your custom metrics in the :mod:`ocw.metrics` module.
+
+You can set the metrics you want to use in the evaluation in the **metrics** section of the config. You simply need to supply a list of the metric class names that you want to be used::
+
+    metrics:
+        - Bias
+        - TemporalStdDev
diff --git a/docs/source/config/plots_settings.rst b/docs/source/config/plots_settings.rst
new file mode 100644
index 0000000..2959092
--- /dev/null
+++ b/docs/source/config/plots_settings.rst
@@ -0,0 +1,44 @@
+Plots Settings
+==============
+
+Plotting configuration information is passed in the **plots** section of the configuration file::
+
+    plots:
+        - type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+
+Each type of support plot has a different configuration format expected. Each of these are explained below. Note, most of these will require you to specify what result data you want included in the plots with the **results_indeces** flag. This relates the format that an Evaluation object outputs results in. Check the :class:`evaluation.Evaluation` documentation for more details.
+
+Contour Maps
+-------------
+
+The contour maps config configures data for OCW's contour plotter :func:`plotting.draw_contour_map`::
+
+    type: contour
+          results_indeces:
+              - !!python/tuple [0, 0]
+          lats:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          lons:
+              range_min: -20
+              range_max: 20
+              range_step: 1
+          output_name: wrf_bias_compared_to_knmi
+          optional_args:
+              gridshape: !!python/tuple [6, 6]
+
+The **lat** and **lon** parameters are specified as a range of values. Be aware that the **range_max** element is not included in the output range so you may need to adjust it slightly if you want a particular value included. The **output_name** parameter is the name of the resulting output graph. You may also pass any optional parameters that are supported by the :func:`plotting.draw_contour_map` function with the **optional_args** flag.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 796e8da..2f5bf63 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -20,6 +20,7 @@
    ocw/utils
    data_source/data_sources
    ui-backend/backend.rst
+   config/config_overview
 
 
 Indices and tables
diff --git a/easy-ocw/install-ubuntu.sh b/easy-ocw/install-ubuntu.sh
index 321bcc7..8018e87 100755
--- a/easy-ocw/install-ubuntu.sh
+++ b/easy-ocw/install-ubuntu.sh
@@ -63,6 +63,7 @@
 WITH_VIRTUAL_ENV=0
 WITH_HOMEBREW=0
 WITH_INTERACT=1
+ocw_path="${HOME}/climate"
 
 while getopts ":h :e :q" FLAG
 do
diff --git a/easy-ocw/ocw-pip-dependencies.txt b/easy-ocw/ocw-pip-dependencies.txt
index 929f1a7..8773730 100644
--- a/easy-ocw/ocw-pip-dependencies.txt
+++ b/easy-ocw/ocw-pip-dependencies.txt
@@ -7,3 +7,5 @@
 pylint==1.2.1
 sphinx==1.2.1
 sphinxcontrib-httpdomain==1.2.1
+esgf-pyclient==0.1.2
+python-dateutil==2.4.1
diff --git a/examples/knmi_to_cru31_full_bias.py b/examples/knmi_to_cru31_full_bias.py
index c6dac47..a241442 100644
--- a/examples/knmi_to_cru31_full_bias.py
+++ b/examples/knmi_to_cru31_full_bias.py
@@ -28,7 +28,6 @@
 import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
-
 # File URL leader
 FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
 # This way we can easily adjust the time span of the retrievals
diff --git a/examples/multi_model_evaluation.py b/examples/multi_model_evaluation.py
new file mode 100644
index 0000000..8136001
--- /dev/null
+++ b/examples/multi_model_evaluation.py
@@ -0,0 +1,151 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import numpy as np
+from os import path
+
+
+#import Apache OCW dependences
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+from ocw.dataset import Bounds as Bounds
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+import ssl
+if hasattr(ssl, '_create_unverified_context'):
+  ssl._create_default_https_context = ssl._create_unverified_context
+  
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "pr_africa_bias_annual"
+#variable that we are analyzing
+varName = 'pr' 
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 1, 1)
+END = datetime.datetime(2007, 12, 31)
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+#regridding parameters
+gridLonStep=0.5
+gridLatStep=0.5
+
+#list for all target_datasets
+target_datasets =[]
+#list for names for all the datasets
+allNames =[]
+
+
+# Download necessary NetCDF file if not present
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if path.exists(FILE_2):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if path.exists(FILE_3):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="UC"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+""" Step 3: Resample Datasets so they are the same shape """
+print("Resampling datasets")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
+
+for member, each_target_dataset in enumerate(target_datasets):
+  target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
+  target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+  target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30))    
+    
+
+""" Spatially Regrid the Dataset Objects to a user defined  grid """
+# Using the bounds we will create a new set of lats and lons 
+print("Regridding datasets")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+for member, each_target_dataset in enumerate(target_datasets):
+  target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+#find the mean value
+#way to get the mean. Note the function exists in util.py 
+_, CRU31.values = utils.calc_climatology_year(CRU31)
+CRU31.values = np.expand_dims(CRU31.values, axis=0)
+
+for member, each_target_dataset in enumerate(target_datasets):
+  _,target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
+  target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)
+
+
+for target in target_datasets:
+  allNames.append(target.name)
+
+#determine the metrics
+mean_bias = metrics.Bias()
+
+#create the Evaluation object
+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
+                                    # list of target datasets for the evaluation
+                                    target_datasets,
+                                    # 1 or more metrics to use in the evaluation
+                                    [mean_bias])   
+RCMs_to_CRU_evaluation.run()
+
+#extract the relevant data from RCMs_to_CRU_evaluation.results 
+#the results returns a list (num_target_datasets, num_metrics). See docs for further details
+rcm_bias = RCMs_to_CRU_evaluation.results[:][0] 
+#remove the metric dimension
+new_rcm_bias = np.squeeze(np.array(RCMs_to_CRU_evaluation.results))
+
+plotter.draw_contour_map(new_rcm_bias, new_lats, new_lons, gridshape=(2, 5),fname=OUTPUT_PLOT, subtitles=allNames, cmap='coolwarm_r')
diff --git a/examples/multi_model_taylor_diagram.py b/examples/multi_model_taylor_diagram.py
new file mode 100644
index 0000000..f91ab3e
--- /dev/null
+++ b/examples/multi_model_taylor_diagram.py
@@ -0,0 +1,144 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+
+from os import path
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "pr_africa_taylor"
+
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 01, 1)
+END = datetime.datetime(2007, 12, 31)
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+#variable that we are analyzing
+varName = 'pr' 
+
+#regridding parameters
+gridLonStep=0.5
+gridLatStep=0.5
+
+#some vars for this evaluation
+target_datasets_ensemble=[]
+target_datasets =[]
+ref_datasets =[]
+
+# Download necessary NetCDF file if not present
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if path.exists(FILE_2):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if path.exists(FILE_3):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="REGM3"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+""" Step 3: Resample Datasets so they are the same shape """
+print("Resampling datasets ...")
+print("... on units")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+print("... temporal")
+CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+	target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30)) 
+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])	
+	
+#Regrid
+print("... regrid")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+	
+#find the mean values
+#way to get the mean. Note the function exists in util.py as def calc_climatology_year(dataset):
+CRU31.values,_ = utils.calc_climatology_year(CRU31)
+CRU31.values = np.expand_dims(CRU31.values, axis=0)
+
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member].values,_ = utils.calc_climatology_year(target_datasets[member])
+	target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)
+	
+allNames =[]
+
+for target in target_datasets:
+	allNames.append(target.name)
+
+#calculate the metrics
+pattern_correlation = metrics.PatternCorrelation()
+spatial_std_dev = metrics.StdDevRatio()
+
+
+#create the Evaluation object
+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
+                                    # 1 or more target datasets for the evaluation                
+                                    target_datasets,
+                                    # 1 or more metrics to use in the evaluation
+                                    [spatial_std_dev, pattern_correlation])#, mean_bias,spatial_std_dev_ratio, pattern_correlation])   
+RCMs_to_CRU_evaluation.run()
+
+rcm_std_dev = [results[0] for results in RCMs_to_CRU_evaluation.results]
+rcm_pat_cor = [results[1] for results in RCMs_to_CRU_evaluation.results]
+
+taylor_data = np.array([rcm_std_dev, rcm_pat_cor]).transpose()
+
+new_taylor_data = np.squeeze(np.array(taylor_data))
+
+plotter.draw_taylor_diagram(new_taylor_data,
+                        allNames, 
+                        "CRU31",
+                        fname=OUTPUT_PLOT,
+                        fmt='png',
+                        frameon=False)
+
+                              
+
diff --git a/examples/simple_model_to_model_bias.py b/examples/simple_model_to_model_bias.py
index 32ecf15..635e872 100644
--- a/examples/simple_model_to_model_bias.py
+++ b/examples/simple_model_to_model_bias.py
@@ -35,24 +35,21 @@
 # Filename for the output image/plot (without file extension)
 OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
 
-# Download necessary NetCDF files if not present
-if path.exists(FILE_1):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+FILE_1_PATH = path.join('/tmp', FILE_1)
+FILE_2_PATH = path.join('/tmp', FILE_2)
 
-if path.exists(FILE_2):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+if not path.exists(FILE_1_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
+if not path.exists(FILE_2_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
 
 """ Step 1: Load Local NetCDF Files into OCW Dataset Objects """
-print("Loading %s into an OCW Dataset Object" % (FILE_1,))
-knmi_dataset = local.load_file(FILE_1, "tasmax")
+print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
+knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
 print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
 
-print("Loading %s into an OCW Dataset Object" % (FILE_2,))
-wrf_dataset = local.load_file(FILE_2, "tasmax")
+print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
+wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
 print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))
 
 """ Step 2: Temporally Rebin the Data into an Annual Timestep """
diff --git a/examples/simple_model_to_model_bias_DJF_and_JJA.py b/examples/simple_model_to_model_bias_DJF_and_JJA.py
new file mode 100644
index 0000000..364498a
--- /dev/null
+++ b/examples/simple_model_to_model_bias_DJF_and_JJA.py
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from os import path
+import urllib
+
+import numpy as np
+
+import ocw.data_source.local as local
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Two Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
+
+FILE_1_PATH = path.join('/tmp', FILE_1)
+FILE_2_PATH = path.join('/tmp', FILE_2)
+
+if not path.exists(FILE_1_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
+if not path.exists(FILE_2_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
+
+""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
+print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
+knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
+
+print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
+wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
+print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))
+
+""" Step 2: Calculate seasonal average """
+print("Calculate seasonal average")
+knmi_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=knmi_dataset))
+wrf_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=wrf_dataset))
+print("Seasonally averaged KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_DJF_mean.shape,))
+print("Seasonally averaged wrf_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_DJF_mean.shape,))
+knmi_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=knmi_dataset))
+wrf_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=wrf_dataset))
+
diff --git a/examples/subregions.py b/examples/subregions.py
new file mode 100644
index 0000000..20aaee9
--- /dev/null
+++ b/examples/subregions.py
@@ -0,0 +1,53 @@
+#Apache OCW lib immports

+from ocw.dataset import Dataset, Bounds

+import ocw.data_source.local as local

+import ocw.data_source.rcmed as rcmed

+import ocw.dataset_processor as dsp

+import ocw.evaluation as evaluation

+import ocw.metrics as metrics

+import ocw.plotter as plotter

+import ocw.utils as utils

+

+import datetime

+import numpy as np

+import numpy.ma as ma

+

+OUTPUT_PLOT = "subregions"

+

+# Spatial and temporal configurations

+LAT_MIN = -45.0 

+LAT_MAX = 42.24

+LON_MIN = -24.0

+LON_MAX = 60.0 

+START_SUB = datetime.datetime(2000, 01, 1)

+END_SUB = datetime.datetime(2007, 12, 31)

+

+#regridding parameters

+gridLonStep=0.5

+gridLatStep=0.5

+

+#Regrid

+print("... regrid")

+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)

+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)

+

+list_of_regions = [

+ Bounds(-10.0, 0.0, 29.0, 36.5, START_SUB, END_SUB), 

+ Bounds(0.0, 10.0,  29.0, 37.5, START_SUB, END_SUB),

+ Bounds(10.0, 20.0, 25.0, 32.5, START_SUB, END_SUB),

+ Bounds(20.0, 33.0, 25.0, 32.5, START_SUB, END_SUB),

+ Bounds(-19.3,-10.2,12.0, 20.0, START_SUB, END_SUB),

+ Bounds( 15.0, 30.0, 15.0, 25.0,START_SUB, END_SUB),

+ Bounds(-10.0, 10.0, 7.3, 15.0, START_SUB, END_SUB),

+ Bounds(-10.9, 10.0, 5.0, 7.3,  START_SUB, END_SUB),

+ Bounds(33.9, 40.0,  6.9, 15.0, START_SUB, END_SUB),

+ Bounds(10.0, 25.0,  0.0, 10.0, START_SUB, END_SUB),

+ Bounds(10.0, 25.0,-10.0,  0.0, START_SUB, END_SUB),

+ Bounds(30.0, 40.0,-15.0,  0.0, START_SUB, END_SUB),

+ Bounds(33.0, 40.0, 25.0, 35.0, START_SUB, END_SUB)]

+

+#for plotting the subregions

+plotter.draw_subregions(list_of_regions, new_lats, new_lons, OUTPUT_PLOT, fmt='png')

+

+                               

+

diff --git a/examples/subregions_portrait_diagram.py b/examples/subregions_portrait_diagram.py
new file mode 100644
index 0000000..075de2d
--- /dev/null
+++ b/examples/subregions_portrait_diagram.py
@@ -0,0 +1,139 @@
+#Apache OCW lib immports

+from ocw.dataset import Dataset, Bounds

+import ocw.data_source.local as local

+import ocw.data_source.rcmed as rcmed

+import ocw.dataset_processor as dsp

+import ocw.evaluation as evaluation

+import ocw.metrics as metrics

+import ocw.plotter as plotter

+import ocw.utils as utils

+

+import datetime

+import numpy as np

+import numpy.ma as ma

+

+from os import path

+import urllib

+

+# File URL leader

+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"

+# Three Local Model Files 

+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"

+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"

+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"

+# Filename for the output image/plot (without file extension)

+OUTPUT_PLOT = "portrait_diagram"

+

+# Spatial and temporal configurations

+LAT_MIN = -45.0 

+LAT_MAX = 42.24

+LON_MIN = -24.0

+LON_MAX = 60.0 

+START = datetime.datetime(2000, 01, 1)

+END = datetime.datetime(2007, 12, 31)

+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)

+

+#variable that we are analyzing

+varName = 'pr' 

+

+#regridding parameters

+gridLonStep = 0.5

+gridLatStep = 0.5

+

+#some vars for this evaluation

+target_datasets_ensemble = []

+target_datasets = []

+allNames = []

+

+# Download necessary NetCDF file if not present

+if not path.exists(FILE_1):

+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)

+

+if not path.exists(FILE_2):

+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)

+

+if not path.exists(FILE_3):

+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)

+

+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""

+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))

+target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))

+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))

+

+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """

+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")

+# the dataset_id and the parameter id were determined from  

+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 

+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)

+

+""" Step 3: Processing Datasets so they are the same shape """

+print("Processing datasets ...")

+CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')

+print("... on units")

+CRU31 = dsp.water_flux_unit_conversion(CRU31)

+

+for member, each_target_dataset in enumerate(target_datasets):

+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])	

+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])

+	target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly') 		

+		

+print("... spatial regridding")

+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)

+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)

+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)

+

+for member, each_target_dataset in enumerate(target_datasets):

+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)

+	

+#find the total annual mean. Note the function exists in util.py as def calc_climatology_year(dataset):

+_,CRU31.values = utils.calc_climatology_year(CRU31)

+

+for member, each_target_dataset in enumerate(target_datasets):

+	_, target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])

+

+#make the model ensemble

+target_datasets_ensemble = dsp.ensemble(target_datasets)

+target_datasets_ensemble.name="ENS"

+

+#append to the target_datasets for final analysis

+target_datasets.append(target_datasets_ensemble)

+

+for target in target_datasets:

+	allNames.append(target.name)

+

+list_of_regions = [

+ Bounds(-10.0, 0.0, 29.0, 36.5), 

+ Bounds(0.0, 10.0,  29.0, 37.5), 

+ Bounds(10.0, 20.0, 25.0, 32.5), 

+ Bounds(20.0, 33.0, 25.0, 32.5), 

+ Bounds(-19.3,-10.2,12.0, 20.0), 

+ Bounds( 15.0, 30.0, 15.0, 25.0),

+ Bounds(-10.0, 10.0, 7.3, 15.0), 

+ Bounds(-10.9, 10.0, 5.0, 7.3),  

+ Bounds(33.9, 40.0,  6.9, 15.0), 

+ Bounds(10.0, 25.0,  0.0, 10.0), 

+ Bounds(10.0, 25.0,-10.0,  0.0), 

+ Bounds(30.0, 40.0,-15.0,  0.0), 

+ Bounds(33.0, 40.0, 25.0, 35.00)]

+

+region_list=["R"+str(i+1) for i in xrange(13)]

+

+#metrics

+pattern_correlation = metrics.PatternCorrelation()

+

+#create the Evaluation object

+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation

+                                    # 1 or more target datasets for the evaluation

+                                    target_datasets,

+                                    # 1 or more metrics to use in the evaluation

+                                    [pattern_correlation], 

+                                    # list of subregion Bounds Objects

+                                    list_of_regions)   

+RCMs_to_CRU_evaluation.run()

+

+new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)

+

+plotter.draw_portrait_diagram(new_patcor,allNames, region_list, fname=OUTPUT_PLOT, fmt='png', cmap='coolwarm_r')

+

+                              

+

diff --git a/examples/time_series_with_regions.py b/examples/time_series_with_regions.py
new file mode 100644
index 0000000..1d552a8
--- /dev/null
+++ b/examples/time_series_with_regions.py
@@ -0,0 +1,141 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+import numpy.ma as ma
+from os import path
+import urllib
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+
+LAT_MIN = -45.0 
+LAT_MAX = 42.24 
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 01, 1)
+END = datetime.datetime(2007, 12, 31)
+
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+varName = 'pr' 
+gridLonStep=0.44
+gridLatStep=0.44
+
+#needed vars for the script
+target_datasets =[]
+tSeries =[]
+results =[]
+labels =[] # could just as easily b the names for each subregion
+region_counter = 0
+
+# Download necessary NetCDF file if not present
+if not path.exists(FILE_1):
+	urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if not path.exists(FILE_2):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if not path.exists(FILE_3):
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+
+""" Step 3: Processing datasets so they are the same shape ... """
+print("Processing datasets so they are the same shape")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+	target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')  		
+	
+print("... spatial regridding")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+
+#find climatology monthly for obs and models
+CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member].values, target_datasets[member].times = utils.calc_climatology_monthly(target_datasets[member])
+		
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+""" Step 4: Subregion stuff """
+list_of_regions = [
+ Bounds(-10.0, 0.0, 29.0, 36.5), 
+ Bounds(0.0, 10.0,  29.0, 37.5), 
+ Bounds(10.0, 20.0, 25.0, 32.5),
+ Bounds(20.0, 33.0, 25.0, 32.5), 
+ Bounds(-19.3,-10.2,12.0, 20.0), 
+ Bounds( 15.0, 30.0, 15.0, 25.0),
+ Bounds(-10.0, 10.0, 7.3, 15.0), 
+ Bounds(-10.9, 10.0, 5.0, 7.3),  
+ Bounds(33.9, 40.0,  6.9, 15.0),
+ Bounds(10.0, 25.0,  0.0, 10.0), 
+ Bounds(10.0, 25.0,-10.0,  0.0), 
+ Bounds(30.0, 40.0,-15.0,  0.0), 
+ Bounds(33.0, 40.0, 25.0, 35.0)]
+
+region_list=[["R"+str(i+1)] for i in xrange(13)]
+
+for regions in region_list:
+	firstTime = True
+	subset_name = regions[0]+"_CRU31"
+	#labels.append(subset_name) #for legend, uncomment this line
+	subset = dsp.subset(list_of_regions[region_counter], CRU31, subset_name)
+	tSeries = utils.calc_time_series(subset)
+	results.append(tSeries)
+	tSeries=[]
+	firstTime = False
+	for member, each_target_dataset in enumerate(target_datasets):
+		subset_name = regions[0]+"_"+target_datasets[member].name
+		#labels.append(subset_name) #for legend, uncomment this line
+		subset = dsp.subset(list_of_regions[region_counter],target_datasets[member],subset_name)
+		tSeries = utils.calc_time_series(subset)
+		results.append(tSeries)
+		tSeries=[]
+	
+	plotter.draw_time_series(np.array(results), CRU31.times, labels, regions[0], ptitle=regions[0],fmt='png')
+	results =[]
+	tSeries =[]
+	labels =[]
+	region_counter+=1
+			
+                               
+
diff --git a/ocw-cli/cli_app.py b/ocw-cli/cli_app.py
new file mode 100644
index 0000000..5f07c0d
--- /dev/null
+++ b/ocw-cli/cli_app.py
@@ -0,0 +1,1422 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import curses
+import sys
+import os
+import numpy as np
+import getpass
+import urllib2
+import json
+
+from netCDF4 import Dataset
+from datetime import datetime, timedelta
+
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.data_source.rcmed as rcmed
+from ocw.dataset import Bounds
+from ocw.data_source.local import load_file
+import ocw.utils as utils
+import ocw.data_source.esgf as esgf
+from ocw_config_runner.configuration_writer import export_evaluation_to_config
+
+import ssl
+if hasattr(ssl, '_create_unverified_context'):
+    ssl._create_default_https_context = ssl._create_unverified_context
+
+def ready_screen(page, note=""):
+    ''' Generates page borders, header, footer and notification center.
+
+    :param page: Name of current page
+    :type page: string
+    :param note: Notification that system returns and will be shown
+         at the bottom of page
+    :type note: string
+
+    :returns: y and x as location of text on screen
+    :rtype: integer
+    '''
+
+    screen.clear()
+    y, x = screen.getmaxyx()
+    screen.border(0)
+    screen.addstr(0, x/2-len(TITLE)/2, TITLE)
+    screen.addstr(y-1, x/2-len(ORGANIZATION)/2, ORGANIZATION)
+    screen.addstr(y-3, 1, "Notification:")
+    for each in range(1, x-1):
+         screen.addstr(y-4, each, "-")
+    if page == "main_menu":
+         screen.addstr(y-3, x-21, "(NC) = Not complete")
+         screen.addstr(y-2, x-21, "(C)  = Complete")
+    if page == "settings_screen":
+         for i in range(y-5):
+              screen.addstr(i+1, x/2-2, ".")
+    screen.addstr(y-2, 1, note)
+
+    return y, x
+
+
+def get_esgf_netCDF_file_name(esgf_dataset_id, esgf_variable):
+    dataset_info = esgf._get_file_download_data(esgf_dataset_id, esgf_variable)
+    netCDF_name = dataset_info[0][0].split("/")[-1]
+
+    return netCDF_name
+
+
+##############################################################
+#         Manage Model Screen
+##############################################################
+
+def load_local_model_screen(header):
+    '''Generates screen to be able to load local model file.
+    Path to model file (netCDF) and variable name is required.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("load_local_model_screen")
+    screen.addstr(1, 1, header + " > Load Local Model File ")
+    screen.addstr(4, 2, "Enter model path: ")
+    model_path = screen.getstr()
+    try:
+         netCDF_file = Dataset(model_path, 'r')
+         all_netcdf_variables = [variable.encode() for variable in netCDF_file.variables.keys()]
+         try:
+              screen.addstr(6, 2, "Enter model variable name {0}: ".format(all_netcdf_variables))
+              variable_name = screen.getstr()
+              screen.addstr(7, 4, "{0}".format(netCDF_file.variables[variable_name]))
+              screen.addstr(20, 2, "Confirm:")
+              screen.addstr(21, 4, "0- No")
+              screen.addstr(22, 4, "1- Yes")
+              screen.addstr(23, 3, "Would you take this variable:")
+              answer = screen.getstr()
+              if answer == "0":
+                   note = "WARNING: Model file cannot be added."
+              elif answer == "1":
+                   model_dataset = load_file(model_path, variable_name)
+                   model_datasets.append(model_dataset)
+                   models_info.append({'directory': model_path, 'variable_name': variable_name})
+                   note = "Model file successfully added."
+              else:
+                   note = "WARNING: Model file cannot be added."
+         except:
+              note = "WARNING: Model file cannot be added. The variable [{0}] is not accepted. Please try again.".format(variable_name)
+         netCDF_file.close()
+    except:
+         note = "WARNING: Model file cannot be read. Please check the file directory or format. Only netCDF format is accepted."
+
+    return note
+
+
+def load_esgf_model_screen(header):
+    '''Generates screen to be able to load ESGF model file.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("load_esgf_model_screen")
+    screen.addstr(1, 1, header + " > Download ESGF Dataset ")
+    screen.addstr(6, 1, "Enter Dataset ID:")
+    esgf_dataset_id = screen.getstr()
+    screen.addstr(7, 1, "Enter Variable:")
+    esgf_variable = screen.getstr()
+    screen.addstr(8, 1, "Enter Username:")
+    esgf_username = screen.getstr()
+    screen.addstr(9, 1, "Enter Password:")
+    esgf_password = screen.getstr()
+    try:
+        solr_url = "http://esg-datanode.jpl.nasa.gov/esg-search/search?id={0}&variable={1}&format=application%2Fsolr%2Bjson".format(esgf_dataset_id, esgf_variable)
+        metadata_json = json.load(urllib2.urlopen(solr_url))
+        if metadata_json['response']['docs'][0]["product"][0] != "observations":
+            screen.addstr(11, 4, "Title: {0}".format(metadata_json['response']['docs'][0]['title']))
+            screen.addstr(12, 4, "Start Date: {0}".format(metadata_json['response']['docs'][0]['datetime_start']))
+            screen.addstr(13, 4, "End Date: {0}".format(metadata_json['response']['docs'][0]['datetime_stop']))
+            screen.addstr(15, 2, "Confirm:")
+            screen.addstr(16, 4, "0- No")
+            screen.addstr(17, 4, "1- Yes")
+            screen.addstr(18, 3, "Would you take this dataset:")
+            answer = screen.getstr()
+            if answer == "0":
+                note = "WARNING: ESGF model file cannot be added."
+            elif answer == "1":
+                try:
+                    screen.addstr(20, 4, "Downloading dataset.....")
+                    screen.refresh()
+                    datasets = esgf.load_dataset(esgf_dataset_id,
+                                                esgf_variable,
+                                                esgf_username,
+                                                esgf_password)
+                    netCDF_name = get_esgf_netCDF_file_name(esgf_dataset_id, esgf_variable)
+                    netCDF_path = "/tmp/{0}".format(netCDF_name)
+                    model_dataset = load_file(netCDF_path, esgf_variable)
+                    model_datasets.append(model_dataset)
+                    models_info.append({'directory': netCDF_path, 'variable_name': esgf_variable})
+                    note = "Dataset successfully downloaded."
+                except:
+                    note = "WARNING: Dataset has not been downloaded. Check your ESGF permission."
+        else:
+            note = "The selected dataset is Observation, please enter model dataset."
+    except:
+        note = "WARNING: Something went wrong in downloading model dataset from ESGF."
+
+    return  note
+
+
+def unload_model_screen(header):
+    '''Generates screen to be able to unload model file.
+    It lists all loaded model with index for each.
+    Selection of model with index will remove model from list of models.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("unload_model_screen")
+    screen.addstr(1, 1, header + " > Unload Model File")
+    screen.addstr(6, 1, "List of Model:")
+    for i, model in enumerate(models_info):
+         screen.addstr(8 + i, 10, "Model Number:[{0}] - Model path:[{1}] - Variables:[{2}]".format(str(i), model['directory'], model['variable_name']))
+    screen.addstr(3, 2, "Select the model number to remove (press enter to go back): ")
+    try:
+         model_remove_index = screen.getstr()
+         models_info.pop(int(model_remove_index))
+         model_datasets.pop(int(model_remove_index))
+         note = "Model file unloaded successfully"
+    except:
+         note = "WARNING: Model file not unloaded successfully."
+
+    return note
+
+
+def list_model_screen(header):
+    '''Generates screen to list all model files.
+
+    :param header: Header of page
+    :type header: string
+    '''
+
+    ready_screen("list_model_screen")
+    screen.addstr(1, 1, header + " > List Model File ")
+    screen.addstr(6, 6, "List of model(s): ")
+    for i, model in enumerate(models_info):
+         screen.addstr(8 + i, 10, "Model Number:[{0}] - Model path:[{1}] - Variables:[{2}]".format(str(i), model['directory'], model['variable_name']))
+    screen.addstr(4, 4, "Return to Manage Model (press Enter) :")
+    screen.getstr()
+
+
+def manage_model_screen(header, note=""):
+    '''Generates Manage Model screen.
+
+    :param header: Header of page
+    :type header: string
+    :param note: Notification, defult to empty string.
+    :type note: string
+    '''
+
+    option = ''
+    while option != '0':
+         ready_screen("manage_model_screen", note)
+         screen.addstr(1, 1, header)
+         screen.addstr(4, 4, "1 - Load Local Model File")
+         screen.addstr(6, 4, "2 - Load ESGF Model File")
+         screen.addstr(8, 4, "3 - Unload Model File")
+         screen.addstr(10, 4, "4 - List Model File")
+         screen.addstr(12, 4, "0 - Return to Main Menu")
+         screen.addstr(14, 2, "Select an option: ")
+         screen.refresh()
+         option = screen.getstr()
+
+         if option == '1':
+              note = load_local_model_screen(header)
+         if option == '2':
+              note = load_esgf_model_screen(header)
+         if option == '3':
+              note = unload_model_screen(header)
+         if option == '4':
+              note = list_model_screen(header)
+              note = " "
+
+
+##############################################################
+#     Manage Observation Screen
+##############################################################
+
+def select_obs_screen(header):   #TODO: if the observation is already selected, don't select again.
+    '''Generates screen to select observation.
+    It reterives list of observations from database and make a table from that.
+    User has to select observation with dataset_id, parameter_id.
+    If the size of terminal screen is small to show whole table, a notification with link to parameter table on website will show up instead.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("select_obs_screen")
+    screen.addstr(1, 1, header + " > Select Observation ")
+    screen.addstr(7, 1, "Observations Table: ")
+    screen.addstr(8, 2, "|D-ID| - |P-ID| - |Database")
+    screen.addstr(9, 2, "|----| - |----| - |--------")
+    all_obs_info = rcmed.get_parameters_metadata()
+    try:
+         for position, obs_info in enumerate(all_obs_info):
+              dataset_id = obs_info['dataset_id']
+              parameter_id = obs_info['parameter_id']
+              database = obs_info['database']
+              line = "|{0:>4}| - |{1:>4}| - |{2}".format(dataset_id, parameter_id, database)
+              if position <= 25:
+                   screen.addstr(10 + position, 2, line)
+              elif position > 25 and position <= 50:
+                   screen.addstr(8, 50, "|D-ID| - |P-ID| - |Database")
+                   screen.addstr(9, 50, "|----| - |----| - |--------")
+                   screen.addstr(10 + position - 26, 50, line)
+              else:
+                   screen.addstr(8, 100, "|D-ID| - |P-ID| - |Database")
+                   screen.addstr(9, 100, "|----| - |----| - |--------")
+                   screen.addstr(10 + position - 51, 100, line)
+    except:
+         ready_screen("select_obs_screen")
+         screen.addstr(1, 1, header + " > Select Observation ")
+         screen.addstr(10, 1, "Observation table cannot be shown due to small screen size. ")
+         screen.addstr(11, 1, "Please enlarge your screen and try again or refer to 'https://rcmes.jpl.nasa.gov/content/data-rcmes-database'. ")
+    try:
+         screen.addstr(2, 1, "More info for observation: https://rcmes.jpl.nasa.gov/content/data-rcmes-database")
+         screen.addstr(4, 2, "Enter Dataset ID (D-ID): ")
+         dataset_id = screen.getstr()
+         screen.addstr(5, 2, "Enter Parameter ID (P-ID): ")
+         parameter_id = screen.getstr()
+
+         for obs in all_obs_info:
+              if obs['dataset_id'] == dataset_id and obs['parameter_id'] == parameter_id:
+                   observations_info.append({
+                        'database':obs['database'],
+                        'dataset_id':dataset_id,
+                        'parameter_id':parameter_id,
+                        'start_date':obs['start_date'],
+                        'end_date':obs['end_date'],
+                        'bounding_box':obs['bounding_box'],
+                        'timestep':obs['timestep'],
+                        'min_lat':float(eval(obs['bounding_box'].encode())[2][0]) if obs['bounding_box'] else None,
+                        'max_lat':float(eval(obs['bounding_box'].encode())[0][0]) if obs['bounding_box'] else None,
+                        'min_lon':float(eval(obs['bounding_box'].encode())[2][1]) if obs['bounding_box'] else None,
+                        'max_lon':float(eval(obs['bounding_box'].encode())[0][1]) if obs['bounding_box'] else None,
+                        'lat_res':float(obs['lat_res'].encode()),
+                        'lon_res':float(obs['lon_res'].encode()),
+                        'unit':obs['units']
+                        })
+                   note = "Observation sucessfully selected."
+                   break
+              else:
+                   note = "WARNING: Observation cannot be selected. There is no observation with given info."
+    except:
+         note = "WARNING: Observation cannot be selected, dataset or parameter id is wrong."
+
+    return  note
+
+
+def load_esgf_obs_screen(header):
+    '''Generates screen to be able to load ESGF observation file.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("load_esgf_obs_screen")
+    screen.addstr(1, 1, header + " > Download ESGF Dataset ")
+    screen.addstr(6, 1, "Enter Dataset ID:")
+    esgf_dataset_id = screen.getstr()
+    screen.addstr(7, 1, "Enter Variable:")
+    esgf_variable = screen.getstr()
+    screen.addstr(8, 1, "Enter Username:")
+    esgf_username = screen.getstr()
+    screen.addstr(9, 1, "Enter Password:")
+    esgf_password = screen.getstr()
+    try:
+        solr_url = "http://esg-datanode.jpl.nasa.gov/esg-search/search?id={0}&variable={1}&format=application%2Fsolr%2Bjson".format(esgf_dataset_id, esgf_variable)
+        metadata_json = json.load(urllib2.urlopen(solr_url))
+        all_variables = metadata_json['response']['docs'][0]['variable']
+        variable_index = all_variables.index(esgf_variable)
+        if metadata_json['response']['docs'][0]["product"][0] == "observations":
+            screen.addstr(11, 4, "Variable Long Name: {0}".format(metadata_json['response']['docs'][0]['variable_long_name'][variable_index]))
+            screen.addstr(12, 4, "Start Date: {0}".format(metadata_json['response']['docs'][0]['datetime_start']))
+            screen.addstr(13, 4, "End Stop: {0}".format(metadata_json['response']['docs'][0]['datetime_stop']))
+            screen.addstr(14, 4, "Time Frequency: {0}".format(metadata_json['response']['docs'][0]['time_frequency']))
+            screen.addstr(15, 4, "Variable Units: {0}".format(metadata_json['response']['docs'][0]['variable_units'][variable_index]))
+            screen.addstr(16, 4, "East Degrees: {0}".format(metadata_json['response']['docs'][0]['east_degrees']))
+            screen.addstr(17, 4, "North Degrees: {0}".format(metadata_json['response']['docs'][0]['north_degrees']))
+            screen.addstr(18, 4, "South Degrees: {0}".format(metadata_json['response']['docs'][0]['south_degrees']))
+            screen.addstr(19, 4, "West Degrees: {0}".format(metadata_json['response']['docs'][0]['west_degrees']))
+            screen.addstr(22, 2, "Confirm:")
+            screen.addstr(23, 4, "0- No")
+            screen.addstr(24, 4, "1- Yes")
+            screen.addstr(25, 3, "Would you take this dataset:")
+            answer = screen.getstr()
+            if answer == "0":
+                note = "WARNING: ESGF observation file cannot be added."
+            elif answer == "1":
+                try:
+                    screen.addstr(27, 4, "Downloading dataset.....")
+                    screen.refresh()
+                    datasets = esgf.load_dataset(esgf_dataset_id,
+                                                esgf_variable,
+                                                esgf_username,
+                                                esgf_password)
+                    netCDF_name = get_esgf_netCDF_file_name(esgf_dataset_id, esgf_variable)
+                    netCDF_path = "/tmp/{0}".format(netCDF_name)
+                    obs_dataset = load_file(netCDF_path, esgf_variable)
+                    observations_info.append({
+                     'database':"{0}".format(netCDF_path),
+                     'dataset_id':"esgf".format(esgf_variable),
+                     'parameter_id':"{0}".format(esgf_variable),
+                     'start_date': obs_dataset.time_range()[0].strftime("%Y-%m-%d"),
+                     'end_date':obs_dataset.time_range()[1].strftime("%Y-%m-%d"),
+                     #'bounding_box':obs['bounding_box'],
+                     'timestep':"monthly",
+                     'min_lat':obs_dataset.spatial_boundaries()[0],
+                     'max_lat':obs_dataset.spatial_boundaries()[1],
+                     'min_lon':obs_dataset.spatial_boundaries()[2],
+                     'max_lon':obs_dataset.spatial_boundaries()[3],
+                     'lat_res':obs_dataset.spatial_resolution()[0],
+                     'lon_res':obs_dataset.spatial_resolution()[1],
+                     'unit':"{0}".format(metadata_json['response']['docs'][0]['variable_units'][1])
+                     })
+                    note = "Dataset successfully downloaded."
+                except:
+                    note = "WARNING: Dataset has not been downloaded."
+        else:
+            note = "The selected dataset is not Observation, please enter observation dataset."
+    except:
+        note = "WARNING: Something went wrong in downloading observation dataset from ESGF."
+
+    return  note
+
+
+def unselect_obs_screen(header):
+    '''Generates screen to be able to unselect observations.
+    Observations can be unselected by entering index allocated to them.
+
+    :param header: Header of page
+    :type header: string
+
+    :returns: Notification
+    :rtype: string
+    '''
+
+    ready_screen("unselect_obs_screen")
+    screen.addstr(1, 1, header + " > Unselect Observation ")
+    screen.addstr(6, 1, "List Observation(s):")
+    for i, obs_info in enumerate(observations_info):
+         screen.addstr(8 + i, 10, " [" + str(i) + "] : " + " Dataset ID: " + obs_info['dataset_id'] + " - Parameter ID: "+ obs_info['parameter_id'] + " - Database: "+ obs_info['database'])
+    screen.addstr(3, 2, "Select the observation to remove (press enter to go back): ")
+    try:
+         obs_remove_index = screen.getstr()
+         observations_info.pop(int(obs_remove_index))
+         note = "Observation sucessfully unselected."
+    except:
+         note = "WARNING: Unselecting model was not successful."
+
+    return note
+
+
+def list_obs_screen(header):
+    '''Generates screen to list observations.
+
+    :param header: Header of page
+    :type header: string
+    '''
+
+    ready_screen("list_obs_screen")
+    screen.addstr(1, 1, header + " > List Observation ")
+    screen.addstr(6, 6, "List of observation(s): ")
+    for i, obs_info in enumerate(observations_info):
+         screen.addstr(8 + i, 10, " [" + str(i) + "] : " + " Dataset ID: " + obs_info['dataset_id'] + " - Parameter ID: "+ obs_info['parameter_id'] + " - Database: "+ obs_info['database'])
+    screen.addstr(4, 4, "Return to Manage Observation (press Enter) :")
+    screen.getstr()
+
+
+def manage_obs_screen(header, note=""):
+    '''Generates Manage Observation screen.
+
+    :param header: Header of page
+    :type header: string
+    :param note: Notification, defult to empty string.
+    :type note: string
+    '''
+
+    option = ''
+    while option != '0':
+         ready_screen("manage_obs_screen", note)
+         screen.addstr(1, 1, header)
+         screen.addstr(4, 4, "1 - Select Observation")
+         screen.addstr(6, 4, "2 - Load ESGF Observation")
+         screen.addstr(8, 4, "3 - Unselect Observation")
+         screen.addstr(10, 4, "4 - List Observation")
+         screen.addstr(12, 4, "0 - Return to Main Menu")
+         screen.addstr(14, 2, "Select an option: ")
+         screen.refresh()
+
+         option = screen.getstr()
+         if option == '1':
+              note = select_obs_screen(header)
+         if option == '2':
+              note = load_esgf_obs_screen(header)
+         if option == '3':
+              note = unselect_obs_screen(header)
+         if option == '4':
+              list_obs_screen(header)
+              note = " "
+
+
+##############################################################
+#     Run Evaluation Screen
+##############################################################
+
+def run_screen(model_datasets, models_info, observations_info,
+               overlap_start_time, overlap_end_time, overlap_min_lat,
+               overlap_max_lat, overlap_min_lon, overlap_max_lon,
+               temp_grid_setting, spatial_grid_setting, reference_dataset, target_datasets, metric, working_directory, plot_title):
+    '''Generates screen to show running evaluation process.
+
+    :param model_datasets: list of model dataset objects
+    :type model_datasets: list
+    :param models_info: list of dictionaries that contain information for each model
+    :type models_info: list
+    :param observations_info: list of dictionaries that contain information for each observation
+    :type observations_info: list
+    :param overlap_start_time: overlap start time between model and obs start time
+    :type overlap_start_time: datetime
+    :param overlap_end_time: overlap end time between model and obs end time
+    :type overlap_end_time: float
+    :param overlap_min_lat: overlap minimum lat between model and obs minimum lat
+    :type overlap_min_lat: float
+    :param overlap_max_lat: overlap maximum lat between model and obs maximum lat
+    :type overlap_max_lat: float
+    :param overlap_min_lon: overlap minimum lon between model and obs minimum lon
+    :type overlap_min_lon: float
+    :param overlap_max_lon: overlap maximum lon between model and obs maximum lon
+    :type overlap_max_lon: float
+    :param temp_grid_setting: temporal grid option such as hourly, daily, monthly and annually
+    :type temp_grid_setting: string
+    :param spatial_grid_setting:
+    :type spatial_grid_setting: string
+    :param reference_dataset: dictionary of reference dataset
+    :type reference_dataset: dictionary
+    :param target_datasets: dictionary of all target datasets
+    :type target_datasets: dictionary
+    :param metric: name of selected metric
+    :type metric: string
+    :param working_directory: path to a directory for storring outputs
+    :type working_directory: string
+    :param plot_title: Title for plot
+    :type plot_title: string
+    '''
+    try:
+        target_datasets_ensemble = []
+        new_model_datasets = model_datasets[:]
+
+        option = None
+        if option != "0":
+             ready_screen("run_evaluation_screen")
+             y = screen.getmaxyx()[0]
+             screen.addstr(2, 2, "Evaluation started....")
+             screen.refresh()
+
+             screen.addstr(4, 4, "Retrieving data...")
+             screen.refresh()
+             obs_dataset = []
+             for i in range(len(observations_info)):
+                  if observations_info[i]['dataset_id'] == "esgf":
+                      obs_dataset.append(load_file(observations_info[i]['database'], observations_info[i]['parameter_id']))
+                  else:
+                      dataset_id = int(observations_info[i]['dataset_id'])
+                      parameter_id = int(observations_info[i]['parameter_id'])
+                      obs_dataset.append(rcmed.parameter_dataset(
+                          dataset_id,
+                          parameter_id,
+                          overlap_min_lat,
+                          overlap_max_lat,
+                          overlap_min_lon,
+                          overlap_max_lon,
+                          overlap_start_time,
+                          overlap_end_time))
+
+             screen.addstr(4, 4, "--> Data retrieved.")
+             screen.refresh()
+
+             EVAL_BOUNDS = Bounds(overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon, overlap_start_time, overlap_end_time)
+
+             screen.addstr(5, 4, "Temporally regridding...")
+             screen.refresh()
+             if temp_grid_setting.lower() == 'hourly':
+                  days = 0.5
+             elif temp_grid_setting.lower() == 'daily':
+                  days = 1
+             elif temp_grid_setting.lower() == 'monthly':
+                  days = 31
+             else:
+                  days = 365
+             for i in range(len(obs_dataset)):
+                  obs_dataset[i] = dsp.temporal_rebin(obs_dataset[i], timedelta(days))
+
+             for member, each_target_dataset in enumerate(new_model_datasets):
+                  new_model_datasets[member] = dsp.temporal_rebin(new_model_datasets[member], timedelta(days))
+                  new_model_datasets[member] = dsp.subset(EVAL_BOUNDS, new_model_datasets[member])
+             screen.addstr(5, 4, "--> Temporally regridded.")
+             screen.refresh()
+
+             screen.addstr(6, 4, "Spatially regridding...")
+             screen.refresh()
+             new_lats = np.arange(overlap_min_lat, overlap_max_lat, spatial_grid_setting)
+             new_lons = np.arange(overlap_min_lon, overlap_max_lon, spatial_grid_setting)
+             for i in range(len(obs_dataset)):
+                  obs_dataset[i] = dsp.spatial_regrid(obs_dataset[i], new_lats, new_lons)
+
+             for member, each_target_dataset in enumerate(new_model_datasets):
+                  new_model_datasets[member] = dsp.spatial_regrid(new_model_datasets[member], new_lats, new_lons)
+             screen.addstr(6, 4, "--> Spatially regridded.")
+             screen.refresh()
+
+             if metric == 'bias':
+                  for i in range(len(obs_dataset)):
+                       _, obs_dataset[i].values = utils.calc_climatology_year(obs_dataset[i])
+                       obs_dataset[i].values = np.expand_dims(obs_dataset[i].values, axis=0)
+
+                  for member, each_target_dataset in enumerate(new_model_datasets):
+                          _, new_model_datasets[member].values = utils.calc_climatology_year(new_model_datasets[member])
+                          new_model_datasets[member].values = np.expand_dims(new_model_datasets[member].values, axis=0)
+
+                  allNames = []
+
+                  for model in new_model_datasets:
+                          allNames.append(model.name)
+
+                  screen.addstr(7, 4, "Setting up metrics...")
+                  screen.refresh()
+                  mean_bias = metrics.Bias()
+                  pattern_correlation = metrics.PatternCorrelation()
+                  spatial_std_dev_ratio = metrics.StdDevRatio()
+                  screen.addstr(7, 4, "--> Metrics setting done.")
+                  screen.refresh()
+
+                  screen.addstr(8, 4, "Running evaluation.....")
+                  screen.refresh()
+                  if reference_dataset[:3] == 'obs':
+                       reference = obs_dataset[int(reference_dataset[-1])]
+                  if reference_dataset[:3] == 'mod':
+                       reference = obs_dataset[int(new_model_datasets[-1])]
+
+                  targets = []
+                  for target in target_datasets:
+                       if target[:3] == 'obs':
+                            targets.append(obs_dataset[int(target[-1])])
+                       if target[:3] == 'mod':
+                            targets.append(new_model_datasets[int(target[-1])])
+
+                  evaluation_result = evaluation.Evaluation(reference, targets, [mean_bias])
+                  export_evaluation_to_config(evaluation_result)
+                  evaluation_result.run()
+                  screen.addstr(8, 4, "--> Evaluation Finished.")
+                  screen.refresh()
+
+                  screen.addstr(9, 4, "Generating plots....")
+                  screen.refresh()
+                  rcm_bias = evaluation_result.results[:][0]
+                  new_rcm_bias = np.squeeze(np.array(evaluation_result.results))
+
+                  if not os.path.exists(working_directory):
+                       os.makedirs(working_directory)
+
+                  fname = working_directory + 'Bias_contour'
+                  plotter.draw_contour_map(new_rcm_bias, new_lats, new_lons, gridshape=(2, 5), fname=fname, subtitles=allNames, cmap='coolwarm_r')
+                  screen.addstr(9, 4, "--> Plots generated.")
+                  screen.refresh()
+                  screen.addstr(y-2, 1, "Press 'enter' to Exit: ")
+                  option = screen.getstr()
+
+             if metric == 'std':
+                  for i in range(len(obs_dataset)):
+                       _, obs_dataset[i].values = utils.calc_climatology_year(obs_dataset[i])
+                       obs_dataset[i].values = np.expand_dims(obs_dataset[i].values, axis=0)
+
+                  target_datasets_ensemble = dsp.ensemble(new_model_datasets)
+                  target_datasets_ensemble.name = "ENS"
+                  new_model_datasets.append(target_datasets_ensemble)
+
+                  for member, each_target_dataset in enumerate(new_model_datasets):
+                          _, new_model_datasets[member].values = utils.calc_climatology_year(new_model_datasets[member])
+                          new_model_datasets[member].values = np.expand_dims(new_model_datasets[member].values, axis=0)
+
+                  allNames = []
+
+                  for model in new_model_datasets:
+                          allNames.append(model.name)
+                  pattern_correlation = metrics.PatternCorrelation()
+                  spatial_std_dev = metrics.StdDevRatio()
+
+                  if reference_dataset[:3] == 'obs':
+                       reference = obs_dataset[int(reference_dataset[-1])]
+                  if reference_dataset[:3] == 'mod':
+                       reference = obs_dataset[int(new_model_datasets[-1])]
+
+                  targets = []
+                  for target in target_datasets:
+                       if target[:3] == 'obs':
+                            targets.append(obs_dataset[int(target[-1])])
+                       if target[:3] == 'mod':
+                            targets.append(new_model_datasets[int(target[-1])])
+
+                  evaluation_result = evaluation.Evaluation(reference, targets, [spatial_std_dev])
+                  export_evaluation_to_config(evaluation_result)
+                  evaluation_result.run()
+
+                  rcm_std_dev = evaluation_result.results
+                  evaluation_result = evaluation.Evaluation(reference, targets, [pattern_correlation])
+                  evaluation_result.run()
+
+                  rcm_pat_cor = evaluation_result.results
+                  taylor_data = np.array([rcm_std_dev, rcm_pat_cor]).transpose()
+                  new_taylor_data = np.squeeze(np.array(taylor_data))
+
+                  if not os.path.exists(working_directory):
+                       os.makedirs(working_directory)
+
+                  fname = working_directory + 'taylor_plot'
+
+                  plotter.draw_taylor_diagram(new_taylor_data, allNames, "CRU31", fname=fname, fmt='png', frameon=False)
+        del new_model_datasets
+        del obs_dataset
+        return "No error"
+    except Exception, error:
+         return "Error: {0}".format(error[0][:200])
+
+
+##############################################################
+#     Settings Screen
+##############################################################
+
+def get_models_temp_bound():
+    '''Get models temporal bound.
+
+    :returns: model start and end time
+    :rtypes: (datatime, datetime)
+    '''
+
+    models_start_time = []
+    models_end_time = []
+    for model in model_datasets:
+         models_start_time.append(model.time_range()[0])
+         models_end_time.append(model.time_range()[1])
+
+    return models_start_time, models_end_time
+
+
+def get_obs_temp_bound():
+    '''Get observation temporal bound.
+
+    :returns: observation start and end time
+    :rtype: (datetime, datetime)
+    '''
+
+    observations_start_time = []
+    observations_end_time = []
+    for obs in observations_info:
+         obs_start_time = datetime.strptime(obs['start_date'], "%Y-%m-%d")
+         observations_start_time.append(obs_start_time)
+         obs_end_time = datetime.strptime(obs['end_date'], "%Y-%m-%d")
+         observations_end_time.append(obs_end_time)
+
+    return observations_start_time, observations_end_time
+
+
+def get_models_temp_overlap(models_start_time, models_end_time):
+    '''Calculate temporal overlap between all the models
+
+    :param models_start_time: models start time
+    :type models_start_time: list of datetimes
+    :param models_end_time: models end time
+    :type models_end_time: list of datetime
+
+    :returns: overlap start and end time between all the models
+    :rtype: (datetime, datetime)
+    '''
+
+    models_overlap_start_time = max(models_start_time)
+    models_overlap_end_time = min(models_end_time)
+
+    #Need to check if all models have temporal overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if models_overlap_end_time <= models_overlap_start_time:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more model does not have temporal overlap with others.")
+
+    return models_overlap_start_time, models_overlap_end_time
+
+
+def get_obs_temp_overlap(observations_start_time, observations_end_time):
+    '''Calculate temporal overlap between all the observations
+
+    :param observations_start_time: observations start time
+    :type observations_start_time: list of datetimes
+    :param observations_end_time: observations end time
+    :type observations_end_time: list of datetime
+
+    :returns: overlap start and end time between all the observations
+    :rtype: (datetime, datetime)
+    '''
+
+    obs_overlap_start_time = max(observations_start_time)
+    obs_overlap_end_time = min(observations_end_time)
+
+    #Need to check if all observations have temporal overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if obs_overlap_end_time <= obs_overlap_start_time:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more observation does not have temporal overlap with others.")
+
+    return obs_overlap_start_time, obs_overlap_end_time
+
+
+def get_all_temp_overlap(models_overlap_start_time, models_overlap_end_time, obs_overlap_start_time, obs_overlap_end_time):
+    '''Calculate temporal overlap between given datasets.
+
+    :param models_overlap_start_time: models overlap start time
+    :type models_overlap_start_time: list of datetimes
+    :param models_overlap_end_time: models overlap end time
+    :type models_overlap_end_time: list of datetime
+    :param obs_overlap_start_time: obs overlap start time
+    :type obs_overlap_start_time: list of datetimes
+    :param obs_overlap_end_time: obs overlap end time
+    :type obs_overlap_end_time: list of datetimes
+
+    :returns: overlap start and end time between models and observations
+    :rtype: (datetime, datetime)
+    '''
+
+    all_overlap_start_time = max([models_overlap_start_time, obs_overlap_start_time])
+    all_overlap_end_time = min([models_overlap_end_time, obs_overlap_end_time])
+
+    #Need to check if all datasets have temporal overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if all_overlap_end_time <= all_overlap_start_time:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more dataset does not have temporal overlap with others.")
+
+    return all_overlap_start_time, all_overlap_end_time
+
+
+def get_models_spatial_bound():               #TODO: convert longitudes to -180, 180 to match with observation data
+    '''Get all models spatial bound.
+
+    :returns: all models spatial boundaries
+    :rtype: list
+    '''
+
+    models_bound = []
+    for model in model_datasets:
+         models_bound.append(model.spatial_boundaries())
+
+    return models_bound
+
+
+def get_models_spatial_overlap(models_bound):
+    '''Calculate spatial overlap between all models.
+
+    :param models_bound: all models spatial boundaries information
+    :type models_bound: list
+
+    :returns: spatial boundaries overlap between all models
+    :rtype: (float, float, float, float)
+    '''
+
+    models_overlap_min_lat = max(each[0] for each in models_bound)
+    models_overlap_max_lat = min(each[1] for each in models_bound)
+    models_overlap_min_lon = max(each[2] for each in models_bound)
+    models_overlap_max_lon = min(each[3] for each in models_bound)
+
+    #Need to check if all models have spatial overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if models_overlap_max_lat <= models_overlap_min_lat or models_overlap_max_lon <= models_overlap_min_lon:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more model does not have spatial overlap with others.")
+
+    return models_overlap_min_lat, models_overlap_max_lat, models_overlap_min_lon, models_overlap_max_lon
+
+
+def get_obs_spatial_bound():
+    '''Get all observations spatial bound.
+
+    :returns: all observations spatial boundaries
+    :rtype: list
+    '''
+
+    observations_bound = []
+    for obs in observations_info:
+         observations_bound.append([obs['min_lat'], obs['max_lat'], obs['min_lon'], obs['max_lon']])
+
+    return observations_bound
+
+
+def get_obs_spatial_overlap(observations_bound):
+    '''Calculate spatial overlap between all observations.
+
+    :param observations_bound: all observations spatial boundaries information
+    :type observations_bound: list
+
+    :returns: spatial boundaries overlap between all observations
+    :rtype: (float, float, float, float)
+    '''
+
+    obs_overlap_min_lat = max(each[0] for each in observations_bound)
+    obs_overlap_max_lat = min(each[1] for each in observations_bound)
+    obs_overlap_min_lon = max(each[2] for each in observations_bound)
+    obs_overlap_max_lon = min(each[3] for each in observations_bound)
+
+    #Need to check if all observations have spatial overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if obs_overlap_max_lat <= obs_overlap_min_lat or obs_overlap_max_lon <= obs_overlap_min_lon:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more observation does not have spatial overlap with others.")
+
+    return obs_overlap_min_lat, obs_overlap_max_lat, obs_overlap_min_lon, obs_overlap_max_lon
+
+
+def get_all_spatial_overlap(models_overlap_min_lat, models_overlap_max_lat, models_overlap_min_lon, models_overlap_max_lon, obs_overlap_min_lat, obs_overlap_max_lat, obs_overlap_min_lon, obs_overlap_max_lon):
+    '''Calculate spatial overlap between all models and observations
+
+    :param models_overlap_min_lat: min latitude between all models
+    :type models_overlap_min_lat: float
+    :param models_overlap_max_lat: max latitude between all models
+    :type models_overlap_max_lat: float
+    :param models_overlap_min_lon: min longitude between all models
+    :type models_overlap_min_lon: float
+    :param models_overlap_max_lon: max longitude between all models
+    :type models_overlap_max_lon: float
+    :param obs_overlap_min_lat: min latitude between all onservations
+    :type obs_overlap_min_lat: float
+    :param obs_overlap_max_lat: max latitude between all onservations
+    :type obs_overlap_max_lat: float
+    :param obs_overlap_min_lon: min longitude between all onservations
+    :type obs_overlap_min_lon: float
+    :param obs_overlap_max_lon: max longitude between all onservations
+    :type obs_overlap_max_lon: float
+
+    :returns: spatial boundaries overlap between all models and observations
+    :rtype: (float, float, float, float)
+    '''
+
+    all_overlap_min_lat = max([models_overlap_min_lat, obs_overlap_min_lat])
+    all_overlap_max_lat = min([models_overlap_max_lat, obs_overlap_max_lat])
+    all_overlap_min_lon = max([models_overlap_min_lon, obs_overlap_min_lon])
+    all_overlap_max_lon = min([models_overlap_max_lon, obs_overlap_max_lon])
+
+    #Need to check if all datasets have spatial overlap, otherwise return
+    # to main menu and print a warning as notification.
+    if all_overlap_max_lat <= all_overlap_min_lat or all_overlap_max_lon <= all_overlap_min_lon:
+         main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more dataset does not have spatial overlap with others.")
+
+    return all_overlap_min_lat, all_overlap_max_lat, all_overlap_min_lon, all_overlap_max_lon
+
+
+def get_models_temp_res():
+    '''Get models temporal resolution.
+
+    :returns: models resolution
+    :rtypes: string
+    '''
+
+    models_resolution = []
+    for model in model_datasets:
+         models_resolution.append(model.temporal_resolution())
+    dic = {0:"hourly", 1:"daily", 2:"monthly", 3:"yearly"}
+    models_resolution_key = []
+    for res in models_resolution:
+         for key, value in dic.items():
+              if value == res:
+                   models_resolution_key.append(key)
+
+    return dic[max(models_resolution_key)]
+
+
+def get_obs_temp_res():
+    '''Get observations temporal resolution.
+
+    :returns: observations resolution
+    :rtypes: string
+    '''
+
+    obs_resolution = []
+    for model in model_datasets:
+         obs_resolution.append(model.temporal_resolution())
+    dic = {0:"hourly", 1:"daily", 2:"monthly", 3:"yearly"}
+    obs_resolution_key = []
+    for res in obs_resolution:
+         for key, value in dic.items():
+              if value == res:
+                   obs_resolution_key.append(key)
+
+    return dic[max(obs_resolution_key)]
+
+
+def get_models_spatial_res():
+    '''Get models spatial resolution
+
+    :returns: maximum models latitude and longitude resolution
+    :rtypes: float, float
+    '''
+
+    models_lat_res = []
+    models_lon_res = []
+    for model in model_datasets:
+         models_lat_res.append(model.spatial_resolution()[0])
+         models_lon_res.append(model.spatial_resolution()[1])
+
+    return max(models_lat_res), max(models_lon_res)
+
+
+def get_obs_spatial_res():
+    '''Get observations spatial resolution
+
+    :returns: maximum observations latitude and longitude resolution
+    :rtypes: float, float
+    '''
+
+    obs_lat_res = []
+    obs_lon_res = []
+    for obs in observations_info:
+         obs_lat_res.append(obs['lat_res'])
+         obs_lon_res.append(obs['lon_res'])
+
+    return max(obs_lat_res), max(obs_lon_res)
+
+
+def settings_screen(header):
+    '''Generates screen for settings before running evaluation.
+
+    :param header: Header of page
+    :type header: string
+    '''
+
+    note = " "
+    models_start_time, models_end_time = get_models_temp_bound()
+    models_overlap_start_time, models_overlap_end_time = get_models_temp_overlap(models_start_time, models_end_time)
+    observations_start_time, observations_end_time = get_obs_temp_bound()
+    obs_overlap_start_time, obs_overlap_end_time = get_obs_temp_overlap(observations_start_time, observations_end_time)
+    all_overlap_start_time, all_overlap_end_time = get_all_temp_overlap(models_overlap_start_time, models_overlap_end_time, obs_overlap_start_time, obs_overlap_end_time)
+    models_bound = get_models_spatial_bound()
+    models_overlap_min_lat, models_overlap_max_lat, models_overlap_min_lon, models_overlap_max_lon = get_models_spatial_overlap(models_bound)
+    observations_bound = get_obs_spatial_bound()
+    obs_overlap_min_lat, obs_overlap_max_lat, obs_overlap_min_lon, obs_overlap_max_lon = get_obs_spatial_overlap(observations_bound)
+    all_overlap_min_lat, all_overlap_max_lat, all_overlap_min_lon, all_overlap_max_lon = get_all_spatial_overlap(models_overlap_min_lat,
+                                                                                                                 models_overlap_max_lat,
+                                                                                                                 models_overlap_min_lon,
+                                                                                                                 models_overlap_max_lon,
+                                                                                                                 obs_overlap_min_lat,
+                                                                                                                 obs_overlap_max_lat,
+                                                                                                                 obs_overlap_min_lon,
+                                                                                                                 obs_overlap_max_lon)
+    model_temp_res = get_models_temp_res()
+    obs_temp_res = get_obs_temp_res()
+    model_lat_res, model_lon_res = get_models_spatial_res()
+    obs_lat_res, obs_lon_res = get_obs_spatial_res()
+
+    temp_grid_option = "Observation"
+    temp_grid_setting = obs_temp_res
+    spatial_grid_option = "Observation"
+    spatial_grid_setting = obs_lat_res
+    models_dict = {}
+
+    for i in enumerate(models_info):
+         models_dict['mod{0}'.format(i[0])] = models_info[i[0]]
+    obs_dict = {}
+    for i in enumerate(observations_info):
+         obs_dict['obs{0}'.format(i[0])] = observations_info[i[0]]
+
+    reference_dataset = 'obs0'
+    target_datasets = []
+    for i in range(len(model_datasets)):
+         target_datasets.append('mod{0}'.format(i))
+    subregion_path = None
+    metrics_dict = {'1':'bias', '2':'std'}
+    metric = 'bias'
+    plots = {'bias':"contour map", 'std':"taylor diagram, bar chart(coming soon)"}
+    working_directory = os.getcwd() + "/plots/"  #Default value of working directory set to "plots" folder in current directory
+    plot_title = '' #TODO: ask user about plot title or figure out automatically
+
+    fix_min_time = all_overlap_start_time
+    fix_max_time = all_overlap_end_time
+    fix_min_lat = all_overlap_min_lat
+    fix_max_lat = all_overlap_max_lat
+    fix_min_lon = all_overlap_min_lon
+    fix_max_lon = all_overlap_max_lon
+
+    option = ''
+    while option != '0':
+         y, x = ready_screen("settings_screen", note)
+         screen.addstr(1, 1, header)
+         screen.addstr(3, 1, "INFORMATION")
+         screen.addstr(4, 1, "===========")
+         screen.addstr(6, 2, "Number of model file:   {0}".format(str(len(model_datasets))))
+         screen.addstr(7, 2, "Number of observation:  {0}".format(str(len(observations_info))))
+         screen.addstr(8, 2, "Temporal Boundaries:")
+         screen.addstr(9, 5, "Start time = {0}".format(all_overlap_start_time))
+         screen.addstr(10, 5, "End time = {0}".format(all_overlap_end_time))
+         screen.addstr(11, 2, "Spatial Boundaries:")
+         screen.addstr(12, 5, "min-lat = {0}".format(all_overlap_min_lat))
+         screen.addstr(13, 5, "max-lat = {0}".format(all_overlap_max_lat))
+         screen.addstr(14, 5, "min-lon = {0}".format(all_overlap_min_lon))
+         screen.addstr(15, 5, "max-lon = {0}".format(all_overlap_max_lon))
+         screen.addstr(16, 2, "Temporal Resolution:")
+         screen.addstr(17, 5, "Model = {0}".format(model_temp_res))
+         screen.addstr(18, 5, "Observation = {0}".format(obs_temp_res))
+         screen.addstr(19, 2, "Spatial Resolution:")
+         screen.addstr(20, 5, "Model:")
+         screen.addstr(21, 10, "lat = {0}".format(model_lat_res))
+         screen.addstr(22, 10, "lon = {0}".format(model_lon_res))
+         screen.addstr(23, 5, "Observation:")
+         screen.addstr(24, 10, "lat = {0}".format(obs_lat_res))
+         screen.addstr(25, 10, "lon = {0}".format(obs_lon_res))
+         screen.addstr(26, 2, "Temporal Grid Option:  {0}".format(temp_grid_option))
+         screen.addstr(27, 2, "Spatial Grid Option:   {0}".format(spatial_grid_option))
+         screen.addstr(28, 2, "Reference Dataset: {0}".format(reference_dataset))
+         screen.addstr(29, 2, "Target Dataset/s: {0}".format([mod for mod in target_datasets]))
+         screen.addstr(30, 2, "Working Directory:")
+         screen.addstr(31, 5, "{0}".format(working_directory))
+         screen.addstr(32, 2, "Metric: {0}".format(metric))
+         screen.addstr(33, 2, "Plot: {0}".format(plots[metric]))
+
+         screen.addstr(3, x/2, "MODIFICATION and RUN")
+         screen.addstr(4, x/2, "====================")
+         screen.addstr(6, x/2, "1 - Change Temporal Boundaries")
+         screen.addstr(7, x/2, "2 - Change Spatial Boundaries")
+         screen.addstr(8, x/2, "3 - Change Temporal Gridding")
+         screen.addstr(9, x/2, "4 - Change Spatial Gridding")
+         screen.addstr(10, x/2, "5 - Change Reference dataset")
+         screen.addstr(11, x/2, "6 - Change Target dataset/s")
+         screen.addstr(12, x/2, "7 - Change Metric")
+         screen.addstr(13, x/2, "8 - Change Working Directory")
+         screen.addstr(14, x/2, "9 - Change Plot Title [Coming Soon....]")
+         screen.addstr(15, x/2, "10 - Save the processed data [Coming Soon....]")
+         screen.addstr(16, x/2, "11 - Show Temporal Boundaries")
+         screen.addstr(17, x/2, "12 - Show Spatial Boundaries")
+         screen.addstr(18, x/2, "0 - Return to Main Menu")
+         screen.addstr(20, x/2, "r - Run Evaluation")
+         screen.addstr(22, x/2, "Select an option: ")
+
+         screen.refresh()
+         option = screen.getstr()
+
+         if option == '1':
+              screen.addstr(25, x/2, "Enter Start Time [min time: {0}] (Format YYYY-MM-DD):".format(fix_min_time))
+              new_start_time = screen.getstr()
+              try:
+                   new_start_time = datetime.strptime(new_start_time, '%Y-%m-%d')
+                   new_start_time_int = int("{0}{1}".format(new_start_time.year, new_start_time.month))
+                   fix_min_time_int = int("{0}{1}".format(fix_min_time.year, fix_min_time.month))
+                   fix_max_time_int = int("{0}{1}".format(fix_max_time.year, fix_max_time.month))
+                   all_overlap_end_time_int = int("{0}{1}".format(all_overlap_end_time.year, all_overlap_end_time.month))
+                   if new_start_time_int < fix_min_time_int \
+                   or new_start_time_int > fix_max_time_int \
+                   or new_start_time_int > all_overlap_end_time_int:
+                        note = "Start time has not changed. "
+                   else:
+                        all_overlap_start_time = new_start_time
+                        note = "Start time has changed successfully. "
+              except:
+                   note = "Start time has not changed. "
+              screen.addstr(26, x/2, "Enter End Time [max time:{0}] (Format YYYY-MM-DD):".format(fix_max_time))
+              new_end_time = screen.getstr()
+              try:
+                   new_end_time = datetime.strptime(new_end_time, '%Y-%m-%d')
+                   new_end_time_int = int("{0}{1}".format(new_end_time.year, new_end_time.month))
+                   fix_min_time_int = int("{0}{1}".format(fix_min_time.year, fix_min_time.month))
+                   fix_max_time_int = int("{0}{1}".format(fix_max_time.year, fix_max_time.month))
+                   all_overlap_start_time_int = int("{0}{1}".format(all_overlap_start_time.year, all_overlap_start_time.month))
+                   if new_end_time_int > fix_max_time_int \
+                   or new_end_time_int < fix_min_time_int \
+                   or new_end_time_int < all_overlap_start_time_int:
+                        note = note + " End time has not changed. "
+                   else:
+                        all_overlap_end_time = new_end_time
+                        note = note + " End time has changed successfully. "
+              except:
+                   note = note + " End time has not changed. "
+
+         if option == '2':
+              screen.addstr(25, x/2, "Enter Minimum Latitude [{0}]:".format(fix_min_lat))
+              new_min_lat = screen.getstr()
+              try:
+                   new_min_lat = float(new_min_lat)
+                   if new_min_lat < fix_min_lat or new_min_lat > fix_max_lat or new_min_lat > all_overlap_max_lat:
+                        note = "Minimum latitude has not changed. "
+                   else:
+                        all_overlap_min_lat = new_min_lat
+                        note = "Minimum latitude has changed successfully. "
+              except:
+                   note = "Minimum latitude has not changed. "
+              screen.addstr(26, x/2, "Enter Maximum Latitude [{0}]:".format(fix_max_lat))
+              new_max_lat = screen.getstr()
+              try:
+                   new_max_lat = float(new_max_lat)
+                   if new_max_lat > fix_max_lat or new_max_lat < fix_min_lat or new_max_lat < all_overlap_min_lat:
+                        note = note + " Maximum latitude has not changed. "
+                   else:
+                        all_overlap_max_lat = new_max_lat
+                        note = note + "Maximum latitude has changed successfully. "
+              except:
+                   note = note + " Maximum latitude has not changed. "
+              screen.addstr(27, x/2, "Enter Minimum Longitude [{0}]:".format(fix_min_lon))
+              new_min_lon = screen.getstr()
+              try:
+                   new_min_lon = float(new_min_lon)
+                   if new_min_lon < fix_min_lon or new_min_lon > fix_max_lon or new_min_lon > all_overlap_max_lon:
+                        note = note + " Minimum longitude has not changed. "
+                   else:
+                        all_overlap_min_lon = new_min_lon
+                        note = note + "Minimum longitude has changed successfully. "
+              except:
+                   note = note + " Minimum longitude has not changed. "
+              screen.addstr(28, x/2, "Enter Maximum Longitude [{0}]:".format(fix_max_lon))
+              new_max_lon = screen.getstr()
+              try:
+                   new_max_lon = float(new_max_lon)
+                   if new_max_lon > fix_max_lon or new_max_lon < fix_min_lon or new_max_lon < all_overlap_min_lon:
+                        note = note + " Maximum longitude has not changed. "
+                   else:
+                        all_overlap_max_lon = new_max_lon
+                        note = note + "Maximum longitude has changed successfully. "
+              except:
+                   note = note + " Maximum longitude has not changed. "
+
+         if option == '3':
+              screen.addstr(25, x/2, "Enter Temporal Gridding Option [Model or Observation]:")
+              new_temp_grid_option = screen.getstr()
+              if new_temp_grid_option.lower() == 'model':
+                   temp_grid_option = 'Model'
+                   temp_grid_setting = model_temp_res
+                   note = "Temporal gridding option has changed successfully to {0}".format(temp_grid_option)
+              elif new_temp_grid_option.lower() == 'observation':
+                   temp_grid_option = 'Observation'
+                   temp_grid_setting = obs_temp_res
+                   note = "Temporal gridding option has changed successfully to {0}".format(temp_grid_option)
+              else:
+                   note = "Temporal gridding option has not changed."
+
+         if option == '4':
+              screen.addstr(25, x/2, "Enter Spatial Gridding Option [Model, Observation or User]:")
+              new_spatial_grid_option = screen.getstr()
+              if new_spatial_grid_option.lower() == 'model':
+                   spatial_grid_option = 'Model'
+                   spatial_grid_setting = model_lat_res
+                   note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
+              elif new_spatial_grid_option.lower() == 'observation':
+                   spatial_grid_option = 'Observation'
+                   spatial_grid_setting = obs_lat_res
+                   note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
+              elif new_spatial_grid_option.lower() == 'user':
+                   screen.addstr(26, x/2, "Please enter spatial resolution: ")
+                   user_res = screen.getstr()
+                   try:
+                        user_res = float(user_res)
+                        spatial_grid_option = 'User: resolution {0}'.format(str(user_res))
+                        spatial_grid_setting = user_res
+                        note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
+                   except:
+                        note = "Spatial gridding option has not changed."
+              else:
+                   note = "Spatial gridding option has not changed."
+
+         if option == '5':
+              screen.addstr(25, x/2, "Model/s:")
+              for each in enumerate(models_dict):
+                   screen.addstr(26 + each[0], x/2 + 2, "{0}: {1}".format(each[1], models_dict[each[1]]['directory'].split("/")[-1]))
+              screen.addstr(26 + len(models_dict), x/2, "Observation/s:")
+              for each in enumerate(obs_dict):
+                   screen.addstr(27 + len(models_dict) + each[0], x/2 + 2, "{0}: {1} - ({2})".format(each[1], obs_dict[each[1]]['database'], obs_dict[each[1]]['unit']))
+              screen.addstr(27 + len(obs_dict) + len(models_dict), x/2, "Please select reference dataset:")
+              selected_reference = screen.getstr()
+              if selected_reference in models_dict:
+                   reference_dataset = selected_reference
+                   note = "Reference dataset successfully changed."
+              elif selected_reference in obs_dict:
+                   reference_dataset = selected_reference
+                   note = "Reference dataset successfully changed."
+              else:
+                   note = "Reference dataset did not change."
+
+         if option == '6':
+              screen.addstr(25, x/2, "Model/s:")
+              for each in enumerate(models_dict):
+                   screen.addstr(26 + each[0], x/2 + 2, "{0}: {1}".format(each[1], models_dict[each[1]]['directory'].split("/")[-1]))
+              screen.addstr(26 + len(models_dict), x/2, "Observation/s:")
+              for each in enumerate(obs_dict):
+                   screen.addstr(27 + len(models_dict) + each[0], x/2 + 2, "{0}: {1} - ({2})".format(each[1], obs_dict[each[1]]['database'], obs_dict[each[1]]['unit']))
+              screen.addstr(27 + len(obs_dict) + len(models_dict), x/2, "Please enter target dataset/s (comma separated for multi target):")
+              selected_target = screen.getstr()
+              selected_target = selected_target.split(",")
+              if selected_target != ['']:
+                   target_datasets = []
+                   for target in selected_target:
+                        if target in models_dict:
+                             target_datasets.append(target)
+                             note = "Target dataset successfully changed."
+                        elif target in obs_dict:
+                             target_datasets.append(target)
+                             note = "Target dataset successfully changed."
+                        else:
+                             note = "Target dataset did not change."
+
+         if option == '7':
+              screen.addstr(25, x/2, "Available metrics:")
+              for i in enumerate(sorted(metrics_dict, key=metrics_dict.get)):
+                   screen.addstr(26 + i[0], x/2 + 2, "[{0}] - {1}".format(i[1], metrics_dict[i[1]]))
+              screen.addstr(26 + len(metrics_dict), x/2, "Please select a metric:")
+              metric_id = screen.getstr()
+              if metric_id in metrics_dict:
+                   metric = metrics_dict[metric_id]
+                   note = "Metric sucessfully changed to {0}".format(metric)
+              else:
+                   note = "Metric has not changes"
+
+         if option == '8':
+              screen.addstr(25, x/2, "Please enter working directory path:")
+              working_directory = screen.getstr()
+              if working_directory:
+                   if working_directory[-1] != '/':
+                        working_directory = working_directory + "/"
+              else:
+                   note = "Working directory has not changed"
+
+         if option == '9':
+              screen.addstr(25, x/2, "Please enter plot title:")
+              plot_title = screen.getstr()
+
+         #if option == '10':
+         #     screen.addstr(25, x/2, "Please enter plot title:")
+         #     plot_title = screen.getstr()
+
+         if option == '11':
+              models_start_time, models_end_time = get_models_temp_bound()
+              line = 25
+              for i, model in enumerate(model_datasets):
+                   mode_name = models_info[i]['directory'].split("/")[-1]
+                   line += 1
+                   screen.addstr(line, x/2, "{0}".format(mode_name))
+                   line += 1
+                   screen.addstr(line, x/2 + 3, "Start:{0} - End:{1}".format(models_start_time[i], models_end_time[i]))
+
+              observations_start_time, observations_end_time = get_obs_temp_bound()
+              for i, obs in enumerate(observations_info):
+                   line += 1
+                   screen.addstr(line, x/2, "{0}".format(observations_info[i]['database']))
+                   line += 1
+                   screen.addstr(line, x/2 + 3, "Start:{0} - End:{1}".format(observations_start_time[i], observations_end_time[i]))
+              screen.getstr()
+
+         if option == '12':
+              models_bound = get_models_spatial_bound()
+              line = 25
+              for i, model in enumerate(model_datasets):
+                   mode_name = models_info[i]['directory'].split("/")[-1]
+                   line += 1
+                   screen.addstr(line, x/2, "{0}".format(mode_name))
+                   line += 1
+                   screen.addstr(line, x/2 + 3, "{0}".format(models_bound[i]))
+
+              observations_bound = get_obs_spatial_bound()
+              for i, obs in enumerate(observations_info):
+                   line += 1
+                   screen.addstr(line, x/2, "{0}".format(observations_info[i]['database']))
+                   line += 1
+                   screen.addstr(line, x/2 + 3, "{0}".format(observations_bound[i]))
+              screen.getstr()
+
+         if option.lower() == 'r':
+              note = run_screen(model_datasets, models_info, observations_info, all_overlap_start_time, all_overlap_end_time, \
+                         all_overlap_min_lat, all_overlap_max_lat, all_overlap_min_lon, all_overlap_max_lon, \
+                         temp_grid_setting, spatial_grid_setting, reference_dataset, target_datasets, metric, working_directory, plot_title)
+
+
+##############################################################
+#     Main Menu Screen
+##############################################################
+
+def main_menu(model_datasets, models_info, observation_datasets, observations_info, note=""):
+    '''This function Generates main menu page.
+
+    :param model_datasets: list of model dataset objects
+    :type model_datasets: list
+    :param models_info: list of dictionaries that contain information for each model
+    :type models_info: list
+    :param observation_datasets: list of observation dataset objects
+    :type observation_datasets: list
+    :param observations_info: list of dictionaries that contain information for each observation
+    :type observations_info: list
+    '''
+
+    option = ''
+    while option != '0':
+         ready_screen("main_menu", note)
+         model_status = "NC" if len(model_datasets) == 0 else "C"     #NC (Not Complete), if there is no model added, C (Complete) if model is added
+         obs_status = "NC" if len(observations_info) == 0 else "C"    #NC (Not Complete), if there is no observation added, C (Complete) if observation is added
+         screen.addstr(1, 1, "Main Menu:")
+         screen.addstr(4, 4, "1 - Manage Model ({0})".format(model_status))
+         screen.addstr(6, 4, "2 - Manage Observation ({0})".format(obs_status))
+         screen.addstr(8, 4, "3 - Run")
+         screen.addstr(10, 4, "0 - EXIT")
+         screen.addstr(16, 2, "Select an option: ")
+         screen.refresh()
+         option = screen.getstr()
+
+         if option == '1':
+              header = "Main Menu > Manage Model"
+              manage_model_screen(header)
+         if option == '2':
+              header = "Main Menu > Manage Observation"
+              manage_obs_screen(header)
+         if option == '3':
+              if model_status == 'NC' or obs_status == 'NC':
+                   main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: Please complete step 1 and 2 before 3.")
+              else:
+                   header = "Main Menu > Run"
+                   settings_screen(header)
+    curses.endwin()
+    sys.exit()
+
+
+if __name__ == '__main__':
+     TITLE = "Project Name"
+     ORGANIZATION = "Organization Name"
+     screen = curses.initscr()
+     model_datasets = []           #list of model dataset objects
+     models_info = []              #list of dictionaries that contain information for each model
+     observation_datasets = []     #list of observation dataset objects
+     observations_info = []        #list of dictionaries that contain information for each observation
+     main_menu(model_datasets, models_info, observation_datasets, observations_info)
diff --git a/ocw-cli/ocw_cli.py b/ocw-cli/ocw_cli.py
deleted file mode 100644
index 85fe2ab..0000000
--- a/ocw-cli/ocw_cli.py
+++ /dev/null
@@ -1,848 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import curses
-import sys
-import os
-import numpy as np
-
-from netCDF4 import Dataset
-from datetime import datetime, timedelta
-
-import ocw.metrics as metrics
-import ocw.plotter as plotter
-import ocw.dataset_processor as dsp
-import ocw.evaluation as evaluation
-import ocw.data_source.rcmed as rcmed
-from ocw.dataset import Bounds
-from ocw.data_source.local import load_file
-
-
-def ready_screen(page, note=""):
-     ''' Generates page borders, header, footer and notification center.
-
-     :param note: Notification that system returns and will be shown
-          at the bottom of page
-     :type note: string
-     :param page: Name of current page
-     :type page: string
-
-     :returns: y and x as location of text on screen
-     :rtype: integer
-     '''
-
-     screen.clear()
-     y, x = screen.getmaxyx()
-     screen.border(0)
-     screen.addstr(0, x/2-len(TITLE)/2, TITLE)
-     screen.addstr(y-1, x/2-len(ORGANIZATION)/2, ORGANIZATION)
-     screen.addstr(y-3, 1, "Notification:")
-     for each in range(1, x-1):
-          screen.addstr(y-4, each, "-")
-     if page == "main_menu":
-          screen.addstr(y-3, x-21, "(NC) = Not complete")
-          screen.addstr(y-2, x-21, "(C)  = Complete")
-     screen.addstr(y-2, 1, note)
-
-     return y, x
-
-
-##############################################################
-#         Manage Model Screen
-##############################################################
-
-def load_model_screen(header):
-     '''Generates screen to be able to load model file.
-     Path to model file (netCDF) and variable name is required.
-
-     :param header: Header of page
-     :type header: string
-
-     :returns: Notification
-     :rtype: string
-     '''
-
-     ready_screen("load_model_screen")
-     screen.addstr(1, 1, header + " > Load Model File ")
-     screen.addstr(4, 2, "Enter model path: ")
-     model_path = screen.getstr()
-     try:
-          netCDF_file = Dataset(model_path, 'r')
-          all_netcdf_variables = [variable.encode() for variable in netCDF_file.variables.keys()]
-          netCDF_file.close()
-          try:
-               screen.addstr(6, 2, "Enter model variable name {0}: ".format(all_netcdf_variables))
-               variable_name = screen.getstr()
-               model_dataset = load_file(model_path, variable_name)
-               model_datasets.append(model_dataset)
-               models_info.append({ 'directory': model_path,
-                                 'variable_name': variable_name
-                              })
-               note = "Model file successfully added."
-          except:
-               note = "WARNING: Model file cannot be added. The variable [{0}] is not accepted. Please try again.".format(variable_name)
-     except:
-          note = "WARNING: Model file cannot be read. Please check the file directory or format. Only netCDF format is accepted."
-
-     return note
-
-
-
-def unload_model_screen(header):
-     '''Generates screen to be able to unload model file.
-     It lists all loaded model with index for each.
-     Selection of model with index will remove model from list of models.
-
-     :param header: Header of page
-     :type header: string
-
-     :returns: Notification
-     :rtype: string
-     '''
-
-     ready_screen("unload_model_screen")
-     screen.addstr(1, 1, header + " > Unload Model File")
-     screen.addstr(6, 1, "List of Model:")
-     for i, model in enumerate(models_info):
-          screen.addstr(8 + i, 10,  "Model Number:[{0}] - Model path:[{1}] - Variables:[{2}]".format(str(i), model['directory'], model['variable_name']))
-     screen.addstr(3, 2, "Select the model number to remove (press enter to go back): ")
-     try:
-          model_remove_index = screen.getstr()
-          models_info.pop(int(model_remove_index))
-          model_datasets.pop(int(model_remove_index))
-          note = "Model file unloaded successfully"
-     except:
-          note = "WARNING: Model file was not unloaded successfully."
-
-     return note
-
-
-def list_model_screen(header):
-     '''Generates screen to list all model files.
-
-     :param header: Header of page
-     :type header: string
-     '''
-
-     ready_screen("list_model_screen")
-     screen.addstr(1, 1, header + " > List Model File ")
-     screen.addstr(6, 6, "List of model(s): ")
-     for i, model in enumerate(models_info):
-          screen.addstr(8 + i, 10,  "Model Number:[{0}] - Model path:[{1}] - Variables:[{2}]".format(str(i), model['directory'], model['variable_name']))
-     screen.addstr(4, 4, "Return to Manage Model (press Enter) :")
-     screen.getstr()
-
-
-def manage_model_screen(header, note=""):
-     '''Generates Manage Model screen.
-
-     :param header: Header of page
-     :type header: string
-     :param note: Notification, defult to empty string.
-     :type note: string
-     '''
-
-     option = ''
-     while option != '0':
-          ready_screen("manage_model_screen", note)
-          screen.addstr(1, 1, header)
-          screen.addstr(4, 4, "1 - Load Model File     [Number of loaded model: {0}]".format(len(model_datasets)))
-          screen.addstr(6, 4, "2 - Unload Model File")
-          screen.addstr(8, 4, "3 - List Model File")
-          screen.addstr(10, 4, "0 - Return to Main Menu")
-          screen.addstr(12, 2, "Select an option: ")
-          screen.refresh()
-          option = screen.getstr()
-
-          if option == '1':
-               note = load_model_screen(header)
-          if option == '2':
-               note = unload_model_screen(header)
-          if option == '3':
-               note = list_model_screen(header)
-               note = " "
-
-
-##############################################################
-#     Manage Observation Screen
-##############################################################
-
-def select_obs_screen(header):   #TODO: if the observation is already selected, don't select again.
-     '''Generates screen to select observation.
-     It reterives list of observations from database and make a table from that.
-     User has to select observation with dataset_id, parameter_id, start_date, end_date, minimum and maximum of lat and lon.
-     If the size of terminal screen is small to show whole table, a notification with link to parameter table on website will show up instead.
-
-     :param header: Header of page
-     :type header: string
-
-     :returns: Notification
-     :rtype: string
-     '''
-
-     ready_screen("select_obs_screen")
-     screen.addstr(1, 1, header + " > Select Observation ")
-     screen.addstr(8, 1, "Observations Table: ")
-     screen.addstr(9, 2, "|Dataset ID| - |Parameter ID| - |Time Step| - |Start Date| - | End Date | - | Min Lat | - | Max Lat | - | Min Lon | - | Max Lat | - |Database name")
-     screen.addstr(10, 2, "|----------| - |------------| - |---------| - |----------| - |----------| - |---------| - |---------| - |---------| - |---------| - |-------------")
-     all_obs_info = rcmed.get_parameters_metadata()
-     try:
-          for position, obs_info in enumerate(all_obs_info):
-               dataset_id = obs_info['dataset_id']
-               parameter_id = obs_info['parameter_id']
-               timestep = obs_info['timestep']
-               start_date = obs_info['start_date']
-               end_date = obs_info['end_date']
-               min_lat = eval(obs_info['bounding_box'].encode())[2][0] if obs_info['bounding_box'] else None
-               max_lat = eval(obs_info['bounding_box'].encode())[0][0] if obs_info['bounding_box'] else None
-               min_lon = eval(obs_info['bounding_box'].encode())[2][1] if obs_info['bounding_box'] else None
-               max_lon = eval(obs_info['bounding_box'].encode())[0][1] if obs_info['bounding_box'] else None
-               database = obs_info['database']
-               line = "|{0:>10}| - |{1:>12}| - |{2:>9}| - |{3}| - |{4}| - |{5:>9}| - |{6:>9}| - |{7:>9}| - |{8:>9}| - |{9}".format(
-                    dataset_id, parameter_id, timestep, start_date, end_date,
-                    str(min_lat), str(max_lat), str(min_lon), str(max_lon), database)
-               screen.addstr(11 + position, 2, line)
-     except:
-          ready_screen("select_obs_screen")
-          screen.addstr(1, 1, header + " > Select Observation ")
-          screen.addstr(10, 1, "Observation table cannot be shown due to small screen size. ")
-          screen.addstr(11, 1, "Please enlarge your screen and try again or refer to 'http://rcmes.jpl.nasa.gov/rcmed/parameters'. ")
-     try:
-          screen.addstr(4, 2, "Enter Dataset ID: ")
-          dataset_id = screen.getstr()
-          screen.addstr(5, 2, "Enter Parameter ID: ")
-          parameter_id = screen.getstr()
-
-          for obs in all_obs_info:
-               if obs['dataset_id'] == dataset_id and obs['parameter_id'] == parameter_id:
-                    observations_info.append({
-                                        'database':obs['database'],
-                                        'dataset_id':dataset_id,
-                                        'parameter_id':parameter_id,
-                                        'start_date':obs['start_date'],
-                                        'end_date':obs['end_date'],
-                                        'bounding_box':obs['bounding_box'],
-                                        'timestep':obs['timestep'],
-                                        'min_lat':float(eval(obs['bounding_box'].encode())[2][0]) if obs['bounding_box'] else None,
-                                        'max_lat':float(eval(obs['bounding_box'].encode())[0][0]) if obs['bounding_box'] else None,
-                                        'min_lon':float(eval(obs['bounding_box'].encode())[2][1]) if obs['bounding_box'] else None,
-                                        'max_lon':float(eval(obs['bounding_box'].encode())[0][1]) if obs['bounding_box'] else None,
-                                        'timestep':obs['timestep'],
-                                        'timestep':obs['timestep'],
-                                        'timestep':obs['timestep'],
-                                        'lat_res':float(obs['lat_res'].encode()),
-                                        'lon_res':float(obs['lon_res'].encode())
-                                        })
-                    note = "Observation sucessfully selected."
-                    break
-               else:
-                    note = "WARNING: Observation cannot be selected. There is no observation with given info."
-     except:
-          note = "WARNING: Observation cannot be selected, dataset or parameter id is wrong."
-
-     return  note
-
-
-def unselect_obs_screen(header):
-     '''Generates screen to be able to unselect observations.
-     Observations can be unselected by entering index allocated to them.
-
-     :param header: Header of page
-     :type header: string
-
-     :returns: Notification
-     :rtype: string
-     '''
-
-     ready_screen("unselect_obs_screen")
-     screen.addstr(1, 1, header + " > Unselect Observation ")
-     screen.addstr(6, 1, "List Observation(s):")
-     for i, obs_info in enumerate(observations_info):
-          screen.addstr(8 + i, 10, " [" + str(i) + "] : " + " Dataset ID: " + obs_info['dataset_id'] + " - Parameter ID: "+ obs_info['parameter_id'] + " - Database: "+ obs_info['database'])
-     screen.addstr(3, 2, "Select the observation to remove (press enter to go back): ")
-     try:
-          obs_remove_index = screen.getstr()
-          observations_info.pop(int(obs_remove_index))
-          note = "Observation sucessfully unselected."
-     except:
-          note = "WARNING: Unselecting model was not successful."
-
-     return note
-
-
-def list_obs_screen(header):
-     '''Generates screen to list observations.
-
-     :param header: Header of page
-     :type header: string
-     '''
-
-     ready_screen("list_obs_screen")
-     screen.addstr(1, 1, header + " > List Observation ")
-     screen.addstr(6, 6, "List of observation(s): ")
-     for i, obs_info in enumerate(observations_info):
-          screen.addstr(8 + i, 10, " [" + str(i) + "] : " + " Dataset ID: " + obs_info['dataset_id'] + " - Parameter ID: "+ obs_info['parameter_id'] + " - Database: "+ obs_info['database'])
-     screen.addstr(4, 4, "Return to Manage Observation (press Enter) :")
-     screen.getstr()
-
-
-def manage_obs_screen(header, note=""):
-     '''Generates Manage Observation screen.
-
-     :param header: Header of page
-     :type header: string
-     :param note: Notification, defult to empty string.
-     :type note: string
-     '''
-
-     option = ''
-     while option != '0':
-          ready_screen("manage_obs_screen", note)
-          screen.addstr(1, 1, header)
-          screen.addstr(4, 4, "1 - Select Observation     [Number of selected observation: {0}]".format(len(observations_info)))
-          screen.addstr(6, 4, "2 - Unselect Observation")
-          screen.addstr(8, 4, "3 - List Observation")
-          screen.addstr(10, 4, "0 - Return to Main Menu")
-          screen.addstr(12, 2, "Select an option: ")
-          screen.refresh()
-
-          option = screen.getstr()
-          if option == '1':
-               note = select_obs_screen(header)
-          if option == '2':
-               note = unselect_obs_screen(header)
-          if option == '3':
-               list_obs_screen(header)
-               note = " "
-
-
-##############################################################
-#     Run Evaluation Screen
-##############################################################
-
-def run_screen(model_datasets, models_info, observations_info,
-               overlap_start_time, overlap_end_time, overlap_min_lat,
-               overlap_max_lat, overlap_min_lon, overlap_max_lon,
-               temp_grid_setting, spatial_grid_setting, working_directory, plot_title):
-     '''Generates screen to show running evaluation process.
-
-     :param model_datasets: list of model dataset objects
-     :type model_datasets: list
-     :param models_info: list of dictionaries that contain information for each model
-     :type models_info: list
-     :param observations_info: list of dictionaries that contain information for each observation
-     :type observations_info: list
-     :param overlap_start_time: overlap start time between model and obs start time
-     :type overlap_start_time: datetime
-     :param overlap_end_time: overlap end time between model and obs end time
-     :type overlap_end_time: float
-     :param overlap_min_lat: overlap minimum lat between model and obs minimum lat
-     :type overlap_min_lat: float
-     :param overlap_max_lat: overlap maximum lat between model and obs maximum lat
-     :type overlap_max_lat: float
-     :param overlap_min_lon: overlap minimum lon between model and obs minimum lon
-     :type overlap_min_lon: float
-     :param overlap_max_lon: overlap maximum lon between model and obs maximum lon
-     :type overlap_max_lon: float
-     :param temp_grid_setting: temporal grid option such as hourly, daily, monthly and annually
-     :type temp_grid_setting: string
-     :param spatial_grid_setting:
-     :type spatial_grid_setting: string
-     :param working_directory: path to a directory for storring outputs
-     :type working_directory: string
-     :param plot_title: Title for plot
-     :type plot_title: string
-     '''
-
-     option = None
-     if option != "0":
-          ready_screen("manage_obs_screen")
-          y = screen.getmaxyx()[0]
-          screen.addstr(2, 2, "Evaluation started....")
-          screen.refresh()
-
-          OUTPUT_PLOT = "plot"
-
-          dataset_id = int(observations_info[0]['dataset_id'])       #just accepts one dataset at this time
-          parameter_id = int(observations_info[0]['parameter_id'])  #just accepts one dataset at this time
-
-          new_bounds = Bounds(overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon, overlap_start_time, overlap_end_time)
-          model_dataset = dsp.subset(new_bounds, model_datasets[0])   #just accepts one model at this time
-
-          #Getting bound info of subseted model file to retrive obs data with same bound as subseted model
-          new_model_spatial_bounds = model_dataset.spatial_boundaries()
-          new_model_temp_bounds = model_dataset.time_range()
-          new_min_lat = new_model_spatial_bounds[0]
-          new_max_lat = new_model_spatial_bounds[1]
-          new_min_lon = new_model_spatial_bounds[2]
-          new_max_lon = new_model_spatial_bounds[3]
-          new_start_time = new_model_temp_bounds[0]
-          new_end_time = new_model_temp_bounds[1]
-
-          screen.addstr(4, 4, "Retrieving data...")
-          screen.refresh()
-
-          #Retrieve obs data
-          obs_dataset = rcmed.parameter_dataset(
-                                        dataset_id,
-                                        parameter_id,
-                                        new_min_lat,
-                                        new_max_lat,
-                                        new_min_lon,
-                                        new_max_lon,
-                                        new_start_time,
-                                        new_end_time)
-          screen.addstr(4, 4, "--> Data retrieved.")
-          screen.refresh()
-
-          screen.addstr(5, 4, "Temporally regridding...")
-          screen.refresh()
-          if temp_grid_setting.lower() == 'hourly':
-               days = 0.5
-          elif temp_grid_setting.lower() == 'daily':
-               days = 1
-          elif temp_grid_setting.lower() == 'monthly':
-               days = 31
-          else:
-               days = 365
-          model_dataset = dsp.temporal_rebin(model_dataset, timedelta(days))
-          obs_dataset = dsp.temporal_rebin(obs_dataset, timedelta(days))
-          screen.addstr(5, 4, "--> Temporally regridded.")
-          screen.refresh()
-
-          new_lats = np.arange(new_min_lat, new_max_lat, spatial_grid_setting)
-          new_lons = np.arange(new_min_lon, new_max_lon, spatial_grid_setting)
-
-          screen.addstr(6, 4, "Spatially regridding...")
-          screen.refresh()
-          spatial_gridded_model = dsp.spatial_regrid(model_dataset, new_lats, new_lons)
-          spatial_gridded_obs = dsp.spatial_regrid(obs_dataset, new_lats, new_lons)
-          screen.addstr(6, 4, "--> Spatially regridded.")
-          screen.refresh()
-
-          screen.addstr(7, 4, "Setting up metrics...")
-          screen.refresh()
-          bias = metrics.Bias()
-          bias_evaluation = evaluation.Evaluation(spatial_gridded_model, [spatial_gridded_obs], [bias])
-          screen.addstr(7, 4, "--> Metrics setting done.")
-          screen.refresh()
-
-          screen.addstr(8, 4, "Running evaluation.....")
-          screen.refresh()
-          bias_evaluation.run()
-          results = bias_evaluation.results[0][0]
-          screen.addstr(8, 4, "--> Evaluation Finished.")
-          screen.refresh()
-
-          screen.addstr(9, 4, "Generating plots....")
-          screen.refresh()
-          lats = new_lats
-          lons = new_lons
-
-          gridshape = (1, 1)
-          sub_titles = [""]   #No subtitle set for now
-
-          if not os.path.exists(working_directory):
-               os.makedirs(working_directory)
-
-          for i in range(len(results)):
-               fname = working_directory + OUTPUT_PLOT + str(i)
-               plotter.draw_contour_map(results[i], lats, lons, fname,
-                               gridshape=gridshape, ptitle=plot_title,
-                               subtitles=sub_titles)
-          screen.addstr(9, 4, "--> Plots generated.")
-          screen.refresh()
-          screen.addstr(y-2, 1, "Press 'enter' to Exit: ")
-          option = screen.getstr()
-
-
-##############################################################
-#     Settings Screen
-##############################################################
-
-def get_model_temp_bound():
-     '''Get model temporal bound.
-
-     :returns: model start and end time
-     :rtypes: (datatime, datetime)
-     '''
-
-     models_start_time = []
-     models_end_time = []
-     for model in model_datasets:
-          models_start_time.append(model.time_range()[0])
-          models_end_time.append(model.time_range()[1])
-
-     return models_start_time, models_end_time
-
-
-def get_obs_temp_bound():
-     '''Get observation temporal bound.
-
-     :returns: observation start and end time
-     :rtype: (datetime, datetime)
-     '''
-
-     observations_start_time = []
-     observations_end_time = []
-     for obs in observations_info:
-          obs_start_time = datetime.strptime(obs['start_date'], "%Y-%m-%d")
-          observations_start_time.append(obs_start_time)
-          obs_end_time = datetime.strptime(obs['end_date'], "%Y-%m-%d")
-          observations_end_time.append(obs_end_time)
-
-     return observations_start_time, observations_end_time
-
-
-def get_temp_overlap(models_start_time, models_end_time, observations_start_time, observations_end_time):
-     '''Calculate temporal overlap between given datasets.
-
-     :param models_start_time: models start time
-     :type models_start_time: list of datetimes
-     :param models_end_time: models end time
-     :type models_end_time: list of datetime
-     :param observations_start_time: obs start time
-     :type observations_start_time: list of datetimes
-     :param observations_end_time: obs end time
-     :type observations_end_time: list of datetimes
-
-     :returns: overlap start and end time between model and observation
-     :rtype: (datetime, datetime)
-     '''
-
-     overlap_start_time = max(models_start_time + observations_start_time)
-     overlap_end_time = min(models_end_time + observations_end_time)
-
-     #Need to check if all datasets have temporal overlap, otherwise return
-     # to main menu and print a warning as notification.
-     if overlap_end_time <= overlap_start_time:
-          main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more dataset does not have temporal overlap with others.")
-
-     return overlap_start_time, overlap_end_time
-
-
-def get_model_spatial_bound():               #TODO: convert longitudes to -180, 180 to match with observation data
-     '''Get model spatial bound.
-
-     :returns: all models spatial boundaries
-     :rtype: list
-     '''
-
-     models_bound = []
-     for model in model_datasets:
-          models_bound.append(model.spatial_boundaries())
-
-     return models_bound
-
-
-def get_obs_spatial_bound():
-     '''Get observation spatial bound.
-
-     :returns: all observations spatial boundaries
-     :rtype: list
-     '''
-
-     observations_bound = []
-     for obs in observations_info:
-          observations_bound.append([obs['min_lat'], obs['max_lat'], obs['min_lon'], obs['max_lon']])
-
-     return observations_bound
-
-
-def get_spatial_overlap(models_bound, observations_bound):
-     '''Calculate spatial overlap between given datasets.
-
-     :param models_bound: all models spatial boundaries information
-     :type models_bound: list
-     :param observations_bound: all observations spatial boundaries information
-     :type observations_bound: list
-
-     :returns: spatial boundaries overlap between model and observation
-     :rtype: (float, float, float, float)
-     '''
-
-     datasets_bound = models_bound + observations_bound
-     overlap_min_lat = max(each[0] for each in datasets_bound)
-     overlap_max_lat = min(each[1] for each in datasets_bound)
-     overlap_min_lon = max(each[2] for each in datasets_bound)
-     overlap_max_lon = min(each[3] for each in datasets_bound)
-
-     #Need to check if all datasets have spatial overlap, otherwise return
-     # to main menu and print a warning as notification.
-     if overlap_max_lat <= overlap_min_lat or overlap_max_lon <= overlap_min_lon:
-          main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: One or more dataset does not have spatial overlap with others.")
-
-     return overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon
-
-
-def settings_screen(header):
-     '''Generates screen for settings before running evaluation.
-
-     :param header: Header of page
-     :type header: string
-     '''
-
-     note = ""
-     models_start_time, models_end_time = get_model_temp_bound()
-     observations_start_time, observations_end_time = get_obs_temp_bound()
-     overlap_start_time, overlap_end_time = get_temp_overlap(models_start_time, models_end_time, observations_start_time, observations_end_time)
-     models_bound = get_model_spatial_bound()
-     observations_bound = get_obs_spatial_bound()
-     overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon = get_spatial_overlap(models_bound, observations_bound)
-     model_temp_res = model_datasets[0].temporal_resolution()       #just accepts one model at this time
-     obs_temp_res = observations_info[0]['timestep']        #just accepts one obs at this time
-     model_lat_res = model_datasets[0].spatial_resolution()[0] #just accepts one model at this time
-     model_lon_res = model_datasets[0].spatial_resolution()[1]  #just accepts one model at this time
-     obs_lat_res = observations_info[0]['lat_res']     #just accepts one obs at this time
-     obs_lon_res = observations_info[0]['lon_res']    #just accepts one obs at this time
-
-     temp_grid_option = "Observation"
-     temp_grid_setting = obs_temp_res
-     spatial_grid_option = "Observation"
-     spatial_grid_setting = obs_lat_res
-     subregion_path = None
-     metrics = 'BIAS'
-     working_directory = os.getcwd() + "/plots/"  #Default value of working directory set to "plots" folder in current directory
-     plot_title = '' #TODO: ask user about plot title or figure out automatically
-
-     fix_min_time = overlap_start_time
-     fix_max_time = overlap_end_time
-     fix_min_lat = overlap_min_lat
-     fix_max_lat = overlap_max_lat
-     fix_min_lon = overlap_min_lon
-     fix_max_lon = overlap_max_lon
-
-     option = ''
-     while option != '0':
-          ready_screen("settings_screen", note)
-          screen.addstr(1, 1, header)
-          screen.addstr(4, 4, "Number of model file:   {0}".format(str(len(model_datasets))))
-          screen.addstr(5, 4, "Number of observation:  {0}".format(str(len(observations_info))))
-          screen.addstr(6, 4, "Temporal Boundaries:    [start time = {0} - end time = {1}]".format(overlap_start_time, overlap_end_time))
-          screen.addstr(7, 4, "Spatial Boundaries:     [min-lat={0}  max-lat={1} min-lon={2} max-lon={3}]".format(overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon))
-          screen.addstr(8, 4, "Temporal Resolution:    [Model={0} - Observation={1}]".format(model_temp_res, obs_temp_res))
-          screen.addstr(9, 4, "Spatial Resolution:     [Model: lat={0} lon={1} - Observation: lat={2} lon={3}]".format(model_lat_res, model_lon_res, obs_lat_res, obs_lon_res))
-          screen.addstr(10, 4, "Temporal Grid Option:   [{0}]".format(temp_grid_option))
-          screen.addstr(11, 4, "Spatial Grid Option:    [{0}]".format(spatial_grid_option))
-          screen.addstr(12, 4, "Working Directory:      {0}".format(working_directory))
-          screen.addstr(13, 4, "Metrics:                {0}".format(metrics))
-
-          screen.addstr(15, 5, "1 - Change Temporal Boundaries")
-          screen.addstr(16, 5, "2 - Change Spatial Boundaries")
-          screen.addstr(17, 5, "3 - Change Temporal Gridding")
-          screen.addstr(18, 5, "4 - Change Spatial Gridding")
-          screen.addstr(19, 5, "5 - Add Subregion file (txt file) [Coming Soon....]")
-          screen.addstr(20, 5, "6 - Modify Metric (add/remove) [Coming Soon....]")
-          screen.addstr(21, 5, "7 - Change Working Directory")
-          screen.addstr(22, 5, "8 - Change Plot Title [Coming Soon....]")
-          screen.addstr(23, 5, "0 - Return to Main Menu")
-          screen.addstr(26, 5, "r - Run Evaluation")
-          screen.addstr(28, 2, "Select an option: ")
-
-          screen.refresh()
-          option = screen.getstr()
-          ### TODO: It breaks when you want to pick start time after end time and same issue with lat, lon.
-
-          if option == '1':
-               screen.addstr(33, 4, "Enter Start Time [min time: {0}] (Format YYYY-MM-DD):".format(fix_min_time))
-               new_start_time = screen.getstr()
-               try:
-                    new_start_time = datetime.strptime(new_start_time, '%Y-%m-%d')
-                    if new_start_time < fix_min_time or new_start_time > fix_max_time or new_start_time > overlap_end_time:
-                         note = "Start time has not changed."
-                    else:
-                         overlap_start_time = new_start_time
-                         note = "Start time has changed successfully."
-               except:
-                    note = "Start time has not changed."
-               screen.addstr(34, 4, "Enter End Time [max time:{0}] (Format YYYY-MM-DD):".format(fix_max_time))
-               new_max_time = screen.getstr()
-               try:
-                    new_max_time = datetime.strptime(new_max_time, '%Y-%m-%d')
-                    if new_max_time > fix_max_time or new_max_time < fix_min_time or new_max_time < overlap_start_time:
-                         note = note + " End time has not changed."
-                    else:
-                         overlap_end_time = new_max_time
-                         note = note + "End time has changed successfully."
-               except:
-                    note = note + " End time has not changed."
-
-          if option == '2':
-               screen.addstr(33, 4, "Enter Minimum Latitude [{0}]:".format(fix_min_lat))
-               new_min_lat = screen.getstr()
-               try:
-                    new_min_lat = float(new_min_lat)
-                    if new_min_lat < fix_min_lat or new_min_lat > fix_max_lat or new_min_lat > overlap_max_lat:
-                         note = "Minimum latitude has not changed."
-                    else:
-                         overlap_min_lat = new_min_lat
-                         note = "Minimum latitude has changed successfully."
-               except:
-                    note = "Minimum latitude has not changed."
-               screen.addstr(34, 4, "Enter Maximum Latitude [{0}]:".format(fix_max_lat))
-               new_max_lat = screen.getstr()
-               try:
-                    new_max_lat = float(new_max_lat)
-                    if new_max_lat > fix_max_lat or new_max_lat < fix_min_lat or new_max_lat < overlap_min_lat:
-                         note = note + " Maximum latitude has not changed."
-                    else:
-                         overlap_max_lat = new_max_lat
-                         note = note + "Maximum latitude has changed successfully."
-               except:
-                    note = note + " Maximum latitude has not changed."
-               screen.addstr(35, 4, "Enter Minimum Longitude [{0}]:".format(fix_min_lon))
-               new_min_lon = screen.getstr()
-               try:
-                    new_min_lon = float(new_min_lon)
-                    if new_min_lon < fix_min_lon or new_min_lon > fix_max_lon or new_min_lon > overlap_max_lon:
-                         note = note + " Minimum longitude has not changed."
-                    else:
-                         overlap_min_lon = new_min_lon
-                         note = note + "Minimum longitude has changed successfully."
-               except:
-                    note = note + " Minimum longitude has not changed."
-               screen.addstr(36, 4, "Enter Maximum Longitude [{0}]:".format(fix_max_lon))
-               new_max_lon = screen.getstr()
-               try:
-                    new_max_lon = float(new_max_lon)
-                    if new_max_lon > fix_max_lon or new_max_lon < fix_min_lon or new_max_lon < overlap_min_lon:
-                         note = note + " Maximum longitude has not changed."
-                    else:
-                         overlap_max_lon = new_max_lon
-                         note = note + "Maximum longitude has changed successfully."
-               except:
-                    note = note + " Maximum longitude has not changed."
-
-          if option == '3':
-               screen.addstr(33, 4, "Enter Temporal Gridding Option [Model or Observation]:")
-               new_temp_grid_option = screen.getstr()
-               if new_temp_grid_option.lower() == 'model':
-                    temp_grid_option = 'Model'
-                    temp_grid_setting = model_temp_res
-                    note = "Temporal gridding option has changed successfully to {0}".format(temp_grid_option)
-               elif new_temp_grid_option.lower() == 'observation':
-                    temp_grid_option = 'Observation'
-                    temp_grid_setting = obs_temp_res
-                    note = "Temporal gridding option has changed successfully to {0}".format(temp_grid_option)
-               else:
-                    note = "Temporal gridding option has not be changed."
-
-          if option == '4':
-               screen.addstr(33, 4, "Enter Spatial Gridding Option [Model, Observation or User]:")
-               new_spatial_grid_option = screen.getstr()
-               if new_spatial_grid_option.lower() == 'model':
-                    spatial_grid_option = 'Model'
-                    spatial_grid_setting = model_lat_res
-                    note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
-               elif new_spatial_grid_option.lower() == 'observation':
-                    spatial_grid_option = 'Observation'
-                    spatial_grid_setting = obs_lat_res
-                    note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
-               elif new_spatial_grid_option.lower() == 'user':
-                    screen.addstr(34, 4, "Please enter spatial resolution: ")
-                    user_res = screen.getstr()
-                    try:
-                         user_res = float(user_res)
-                         spatial_grid_option = 'User: resolution {0}'.format(str(user_res))
-                         spatial_grid_setting = user_res
-                         note = "Spatial gridding option has changed successfully to {0}".format(spatial_grid_option)
-                    except:
-                         note = "Spatial gridding option has not be changed."
-               else:
-                    note = "Spatial gridding option has not be changed."
-
-          '''
-          if option == '5':
-               screen.addstr(33, 4, "Please enter one Subregion path:")
-               subregion_path = screen.getstr()
-          '''
-          if option == '7':
-               screen.addstr(33, 4, "Please enter working directory path:")
-               working_directory = screen.getstr()
-               if working_directory[-1] != '/':
-                    working_directory = working_directory + "/"
-
-          if option == '8':
-               screen.addstr(33, 4, "Please enter plot title:")
-               plot_title = screen.getstr()
-
-          if option.lower() == 'r':
-               run_screen(model_datasets, models_info, observations_info, overlap_start_time, overlap_end_time, \
-                          overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon, \
-                          temp_grid_setting, spatial_grid_setting, working_directory, plot_title)
-
-
-##############################################################
-#     Main Menu Screen
-##############################################################
-
-def main_menu(model_datasets, models_info, observation_datasets, observations_info, note=""):
-     '''This function Generates main menu page.
-
-     :param model_datasets: list of model dataset objects
-     :type model_datasets: list
-     :param models_info: list of dictionaries that contain information for each model
-     :type models_info: list
-     :param observation_datasets: list of observation dataset objects
-     :type observation_datasets: list
-     :param observations_info: list of dictionaries that contain information for each observation
-     :type observations_info: list
-     '''
-
-     option = ''
-     while option != '0':
-          ready_screen("main_menu", note)
-          model_status = "NC" if len(model_datasets) == 0 else "C"     #NC (Not Complete), if there is no model added, C (Complete) if model is added
-          obs_status = "NC" if len(observations_info) == 0 else "C"    #NC (Not Complete), if there is no observation added, C (Complete) if observation is added
-          screen.addstr(1, 1, "Main Menu:")
-          screen.addstr(4, 4, "1 - Manage Model ({0})".format(model_status))
-          screen.addstr(6, 4, "2 - Manage Observation ({0})".format(obs_status))
-          screen.addstr(8, 4, "3 - Run(Config File) [coming soon....]")
-          screen.addstr(10, 4, "4 - Run(Settings)")
-          screen.addstr(12, 4, "0 - EXIT")
-          screen.addstr(18, 2, "Select an option: ")
-          screen.refresh()
-          option = screen.getstr()
-
-          if option == '1':
-               header = "Main Menu > Manage Model"
-               manage_model_screen(header)
-          if option == '2':
-               header = "Main Menu > Manage Observation"
-               manage_obs_screen(header)
-          if option == '3':
-               header = "Main Menu > Run(Config File)"
-               #TODO: function to read config file and run evaluation
-          if option == '4':
-               if model_status =='NC' or obs_status == 'NC':
-                    main_menu(model_datasets, models_info, observation_datasets, observations_info, note="WARNING: Please complete step 1 and 2 before 4.")
-               else:
-                    header = "Main Menu > Run(Settings)"
-                    settings_screen(header)
-     curses.endwin()
-     sys.exit()
-
-
-if __name__ == '__main__':
-     TITLE = "Open Climate Workbench Evaluation System"
-     ORGANIZATION = "Apache Software Foundation"
-     screen = curses.initscr()
-     model_datasets = []           #list of model dataset objects
-     models_info = []              #list of dictionaries that contain information for each model
-     observation_datasets = []     #list of observation dataset objects
-     observations_info = []        #list of dictionaries that contain information for each observation
-     main_menu(model_datasets, models_info, observation_datasets, observations_info)
diff --git a/ocw-config-runner/configuration_parsing.py b/ocw-config-runner/configuration_parsing.py
new file mode 100644
index 0000000..5c28249
--- /dev/null
+++ b/ocw-config-runner/configuration_parsing.py
@@ -0,0 +1,285 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+import re
+import sys
+
+import ocw.metrics as metrics
+
+import yaml
+
+logging.basicConfig()
+logger = logging.getLogger(__name__)
+
+def is_config_valid(config_data):
+    """ Validate supplied evaluation configuration data.
+
+    :param config_data: Dictionary of the data parsed from the supplied YAML
+        configuration file.
+    :type config_data: :func:`dict`
+
+    :returns: True if the configuration data is sufficient for an evaluation and
+        seems to be well formed, False otherwise.
+    """
+    if not _valid_minimal_config(config_data):
+        logger.error('Insufficient configuration file data for an evaluation')
+        return False
+
+    if not _config_is_well_formed(config_data):
+        logger.error('Configuration data is not well formed')
+        return False
+
+    return True
+
+def _valid_minimal_config(config_data):
+    """"""
+    if not 'datasets' in config_data.keys():
+        logger.error('No datasets specified in configuration data.')
+        return False
+
+    if not 'metrics' in config_data.keys():
+        logger.error('No metrics specified in configuration data.')
+        return False
+
+    if _contains_unary_metrics(config_data['metrics']):
+        if (not 'reference' in config_data['datasets'].keys() and 
+            not 'targets' in config_data['datasets'].keys()):
+            err = (
+                'Unary metric in configuration data requires either a reference '
+                'or target dataset to be present for evaluation. Please ensure '
+                'that your config is well formed.'
+            )
+            logger.error(err)
+            return False
+
+    if _contains_binary_metrics(config_data['metrics']):
+        if (not 'reference' in config_data['datasets'].keys() or 
+            not 'targets' in config_data['datasets'].keys()):
+            logger.error(
+                'Binary metric in configuration requires both a reference '
+                'and target dataset to be present for evaluation. Please ensure '
+                'that your config is well formed.'
+            )
+            return False
+
+    return True
+
+def _config_is_well_formed(config_data):
+    """"""
+    is_well_formed = True
+
+    if 'reference' in config_data['datasets']:
+        if not _valid_dataset_config_data(config_data['datasets']['reference']):
+            is_well_formed = False
+
+    if 'targets' in config_data['datasets']:
+        targets = config_data['datasets']['targets']
+        if type(targets) != type(list()):
+            err = (
+                'Expected to find list of target datasets but instead found '
+                'object of type {}'
+            ).format(type(targets))
+            logger.error(err)
+            is_well_formed = False
+        else:
+            for t in targets:
+                if not _valid_dataset_config_data(t):
+                    is_well_formed = False
+
+    available_metrics = _fetch_built_in_metrics()
+    for metric in config_data['metrics']:
+        if metric not in available_metrics:
+            warning = (
+                'Unable to locate metric name {} in built-in metrics. If this '
+                'is not a user defined metric then please check for potential '
+                'misspellings.'
+            ).format(metric)
+            logger.warn(warning)
+            is_well_formed = False
+
+    if 'subregions' in config_data:
+        for subregion in config_data['subregions']:
+            if not _valid_subregion_config_data(subregion):
+                is_well_formed = False
+
+    if 'plots' in config_data:
+        for plot in config_data['plots']:
+            if not _valid_plot_config_data(plot):
+                is_well_formed = False
+            # Ensure that if we're trying to make a plot that require
+            # subregion info that the config has this present.
+            elif plot['type'] in ['subregion', 'portrait']:
+                if ('subregions' not in config_data or
+                    len(config_data['subregions']) < 1):
+                    logger.error(
+                        'Plot config that requires subregion information is present '
+                        'in a config file without adequate subregion information '
+                        'provided. Please ensure that you have properly supplied 1 or '
+                        'more subregion config values.'
+                    )
+                    is_well_formed = False
+
+
+    return is_well_formed
+
+def _contains_unary_metrics(config_metric_data):
+    """"""
+    unarys = [cls.__name__ for cls in metrics.UnaryMetric.__subclasses__()]
+    return any(metric in unarys for metric in config_metric_data)
+
+def _contains_binary_metrics(config_metric_data):
+    """"""
+    binarys = [cls.__name__ for cls in metrics.BinaryMetric.__subclasses__()]
+    return any(metric in binarys for metric in config_metric_data)
+
+def _fetch_built_in_metrics():
+    """"""
+    unarys = [cls.__name__ for cls in metrics.UnaryMetric.__subclasses__()]
+    binarys = [cls.__name__ for cls in metrics.BinaryMetric.__subclasses__()]
+    return unarys + binarys
+
+def _valid_dataset_config_data(dataset_config_data):
+    """"""
+    try:
+        data_source = dataset_config_data['data_source']
+    except KeyError:
+        logger.error('Dataset does not contain a data_source attribute.')
+        return False
+
+    if data_source == 'local':
+        required_keys = set(['data_source', 'file_count', 'path', 'variable'])
+    elif data_source == 'rcmed':
+        required_keys = set([
+            'dataset_id',
+            'parameter_id',
+            'min_lat',
+            'max_lat',
+            'min_lon',
+            'max_lon',
+            'start_time',
+            'end_time',
+        ])
+    elif data_source == 'esgf':
+        required_keys = set([
+            'data_source',
+            'dataset_id',
+            'variable',
+            'esgf_username',
+            'esgf_password'
+        ])
+    elif data_source == 'dap':
+        required_keys = set({'url', 'variable'})
+    else:
+        logger.error('Dataset does not contain a valid data_source location.')
+        return False
+
+    present_keys = set(dataset_config_data.keys())
+    missing_keys = required_keys - present_keys
+    contains_required = len(missing_keys) == 0
+
+    if contains_required:
+        if data_source == 'local' and dataset_config_data['file_count'] > 1:
+            # If the dataset is a multi-file dataset then we need to make sure
+            # that the file glob pattern is included.
+            if not 'file_glob_pattern' in dataset_config_data:
+                logger.error(
+                    'Multi-file local dataset is missing key: file_glob_pattern'
+                )
+                return False
+        return True
+    else:
+        missing = sorted(list(missing_keys))
+        logger.error(
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        return False
+
+def _valid_plot_config_data(plot_config_data):
+    """"""
+    try:
+        plot_type = plot_config_data['type']
+    except KeyError:
+        logger.error('Plot config does not include a type attribute.')
+        return False
+
+    if plot_type == 'contour':
+        required_keys = set([
+            'results_indices',
+            'lats',
+            'lons',
+            'output_name'
+        ])
+    elif plot_type == 'taylor':
+        required_keys = set([
+            'stddev_results_indices',
+            'pattern_corr_results_indices',
+            'output_name'
+        ])
+    elif plot_type == 'subregion':
+        required_keys = set([
+            'lats',
+            'lons',
+            'output_name'
+        ])
+    elif plot_type == 'time_series':
+        required_keys = set([
+            'time_range'
+        ])
+    elif plot_type == 'portrait':
+        required_keys = set([
+            'metric_index',
+            'output_name'
+        ])
+    else:
+        logger.error('Invalid plot type specified.')
+        return False
+
+    present_keys = set(plot_config_data.keys())
+    missing_keys = required_keys - present_keys
+    contains_required = len(missing_keys) == 0
+
+    if not contains_required:
+        missing = sorted(list(missing_keys))
+        logger.error(
+            'Plot config does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        return False
+
+    return True
+
+def _valid_subregion_config_data(subregion_config_data):
+    """"""
+    if type(subregion_config_data) != type([]):
+        logger.error(
+            'Subregions should be passed as a list of lists where '
+            'each sub-list contains a bounding box of the form: '
+            '[lat_min, lat_max, lon_min, lon_max].'
+        )
+        return False
+
+    if len(subregion_config_data) != 4:
+        logger.error(
+            'Subregions should be passed as a list of lists where '
+            'each sub-list contains a bounding box of the form: '
+            '[lat_min, lat_max, lon_min, lon_max].'
+        )
+        return False
+
+    return True
diff --git a/ocw-config-runner/configuration_writer.py b/ocw-config-runner/configuration_writer.py
new file mode 100644
index 0000000..8fc9242
--- /dev/null
+++ b/ocw-config-runner/configuration_writer.py
@@ -0,0 +1,302 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime as dt
+import logging
+
+import yaml
+
+logging.basicConfig()
+logger = logging.getLogger(__name__)
+
+def export_evaluation_to_config(evaluation, file_path='./exported_eval.yaml'):
+    ''' Export an evaluation to a config file
+
+    :param evaluation: The evaluation object to export.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :param file_path: Optional file path where the config file should be saved.
+    :type file_path: :mod:`string`
+    '''
+    config = {}
+
+    config['evaluation'] = generate_evaluation_information(evaluation)
+    config['datasets'] = generate_dataset_information(evaluation)
+    config['metrics'] = generate_metric_information(evaluation)
+    config['subregions'] = generate_subregion_information(evaluation)
+
+    yaml.dump(config, file(file_path, 'w'))
+
+def generate_dataset_information(evaluation):
+    ''' Generate dataset config file output for a given Evaluation object.
+
+    :param evaluation: The evaluation object from which to extract metrics.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :returns: A :func:`dict` of dataset configuration information for export
+        to a configuration file.
+    :rtype: :func:`dict`
+    '''
+    datasets = {}
+
+    if evaluation.ref_dataset:
+        datasets['reference'] = generate_dataset_config(evaluation.ref_dataset)
+
+    if len(evaluation.target_datasets) > 0:
+        datasets['targets'] = [
+            generate_dataset_config(target)
+            for target in evaluation.target_datasets
+        ]
+
+    return datasets
+
+def generate_dataset_config(dataset):
+    ''' Generate dataset config file output for a given Dataset object.
+
+    :param dataset: The dataset from which to extract configuration
+        information.
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: :func:`dict` containing necessary information for
+        dataset to be saved into a configuration object.
+
+    :raises AttributeError: If dataset does not contain expected source data.
+    '''
+    dataset_source = dataset.origin['source']
+
+    if dataset_source == 'local':
+        info = _extract_local_dataset_info(dataset)
+    elif dataset_source == 'rcmed':
+        info = _extract_rcmed_dataset_info(dataset)
+    elif dataset_source == 'esgf':
+        info = _extract_esgf_dataset_info(dataset)
+    elif dataset_source == 'dap':
+        info = _extract_dap_dataset_info(dataset)
+    else:
+        err = (
+            "Unexpected source in dataset origin information."
+            "Found {}."
+        ).format(dataset_source)
+        logger.error(err)
+        raise AttributeError(err)
+
+    info['optional_args']['name'] = dataset.name
+    info['optional_args']['units'] = dataset.units
+
+    return info
+
+def generate_metric_information(evaluation):
+    ''' Generate metric config file output from a given Evaluation object.
+
+    :param evaluation: The evaluation object from which to extract metrics.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :returns: A :func:`list` of :mod:`metrics` object names for output into
+        a configuration file.
+    :rtype: :func:`list` of :mod:`metrics`
+    '''
+    unary_metrics = [x.__class__.__name__ for x in evaluation.unary_metrics]
+    binary_metrics = [x.__class__.__name__ for x in evaluation.metrics]
+
+    return unary_metrics + binary_metrics
+
+def generate_evaluation_information(evaluation):
+    ''' Generate evaluation-related config file output.
+
+    Attempts to parse out temporal and spatial rebinning/regridding information
+    from the supplied evaluation object. If no datasets can be found, values
+    are defaulted to sane defaults or (potentially) excluded entirely.
+
+    It's important to note that this function does its best to extrapolate the
+    configuration information. It's possible that you will encounter a scenario
+    where the guessed values are not what you want/expect. Please double
+    check the output before blinding trusting what this generates.
+
+    :param evaluation: The evaluation object from which to extract metrics.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :returns: A dictionary of valid `evaluation` section settings for export
+        to a configuration file.
+    :rtype: :func:`dict`
+    '''
+    eval_config = {
+        'temporal_time_delta': 999,
+        'spatial_regrid_lats': (-90, 90, 1),
+        'spatial_regrid_lons': (-180, 180, 1),
+        'subset': [-90, 90, -180, 180, "1500-01-01", "2500-01-01"],
+    }
+
+    datasets = []
+
+    if evaluation.ref_dataset:
+        datasets.append(evaluation.ref_dataset)
+
+    if evaluation.target_datasets:
+        datasets += evaluation.target_datasets
+
+    if len(datasets) > 0:
+        eval_config['temporal_time_delta'] = _calc_temporal_bin_size(datasets)
+
+        lats, lons = _calc_spatial_lat_lon_grid(datasets)
+        eval_config['spatial_regrid_lats'] = lats
+        eval_config['spatial_regrid_lons'] = lons
+
+        eval_config['subset'] = _calc_subset_config(datasets)
+
+    return eval_config
+
+def generate_subregion_information(evaluation):
+    ''' Generate subregion config file output from a given Evaluation object.
+
+    :param evaluation: The evaluation object from which to extract metrics.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :returns: A :func:`list` of :func:`list` objects containing bounding
+        box info for export into a configuration file
+    :rtype: :func:`list` of :func:`list`
+    '''
+    subregions = []
+    for s in evaluation.subregions:
+        subregions.append([s.lat_min, s.lat_max, s.lon_min, s.lon_max])
+
+    return subregions
+
+def _extract_local_dataset_info(dataset):
+    ''''''
+    dataset_info = {'optional_args': {}}
+
+    dataset_info['data_source'] = 'local'
+    dataset_info['file_count'] = 1
+    dataset_info['path'] = dataset.origin['path']
+    dataset_info['variable'] = dataset.variable
+
+    dataset_info['optional_args']['lat_name'] = dataset.origin['lat_name']
+    dataset_info['optional_args']['lon_name'] = dataset.origin['lon_name']
+    dataset_info['optional_args']['time_name'] = dataset.origin['time_name']
+
+    if 'elevation_index' in dataset.origin:
+        elev = dataset.origin['elevation_index']
+        dataset_info['optional_args']['elevation_index'] = elev
+
+    return dataset_info
+
+def _extract_rcmed_dataset_info(dataset):
+    ''''''
+    dataset_info = {'optional_args': {}}
+
+    min_lat, max_lat, min_lon, max_lon = dataset.spatial_boundaries()
+    start_time, end_time = dataset.time_range()
+
+    dataset_info['data_source'] = 'rcmed'
+    dataset_info['dataset_id'] = dataset.origin['dataset_id']
+    dataset_info['parameter_id'] = dataset.origin['parameter_id']
+    dataset_info['min_lat'] = min_lat
+    dataset_info['max_lat'] = max_lat
+    dataset_info['min_lon'] = min_lon
+    dataset_info['max_lon'] = max_lon
+    dataset_info['start_time'] = str(start_time)
+    dataset_info['end_time'] = str(end_time)
+
+    return dataset_info
+
+def _extract_esgf_dataset_info(dataset):
+    ''''''
+    dataset_info = {'optional_args': {}}
+
+    dataset_info['data_source'] = 'esgf'
+    dataset_info['dataset_id'] = dataset.origin['dataset_id']
+    dataset_info['variable'] = dataset.origin['variable']
+    dataset_info['esgf_username'] = 'Put your ESGF Username here'
+    dataset_info['esgf_password'] = 'Put your ESGF Password here'
+
+    return dataset_info
+
+def _extract_dap_dataset_info(dataset):
+    ''''''
+    dataset_info = {'optional_args': {}}
+
+    dataset_info['data_source'] = 'dap'
+    dataset_info['url'] = dataset.origin['url']
+    dataset_info['variable'] = dataset.variable
+
+    return dataset_info
+
+def _calc_temporal_bin_size(datasets):
+    ''''''
+    times = datasets[0].times
+    time_delta = times[1] - times[0]
+
+    if time_delta.days == 0:
+        return 1
+    elif time_delta.days <= 31:
+        return 31
+    elif time_delta.days <= 366:
+        return 366
+    else:
+        return 999
+
+def _calc_spatial_lat_lon_grid(datasets):
+    ''''''
+    lat_min, lat_max, lon_min, lon_max = datasets[0].spatial_boundaries()
+
+    lats = datasets[0].lats
+    lons = datasets[0].lons
+    # These explicit float casts are needed to ensure that the type of the
+    # lat/lon steps are not numpy values. PyYAML will choke on export if it
+    # encounters a Numpy value.
+    lat_step = float(abs(lats[1] - lats[0]))
+    lon_step = float(abs(lons[1] - lons[0]))
+
+    # We need to add an extra step value onto the end so when we generate a
+    # range with these values we don't lose one that we're expecting.
+    if lat_max != 90: lat_max += lat_step
+    if lon_max != 180: lon_max += lon_step
+
+    return ((lat_min, lat_max, lat_step), (lon_min, lon_max, lon_step))
+
+def _calc_subset_config(datasets):
+    ''''''
+    lat_min = 90
+    lat_max = -90
+    lon_min = 180
+    lon_max = -180
+    start = dt.datetime(2500, 1, 1)
+    end = dt.datetime(1500, 1, 1)
+
+    for ds in datasets:
+        ds_lat_min, ds_lat_max, ds_lon_min, ds_lon_max = ds.spatial_boundaries()
+        ds_start, ds_end = ds.time_range()
+
+        if ds_lat_min < lat_min:
+            lat_min = ds_lat_min
+
+        if ds_lat_max > lat_max:
+            lat_max = ds_lat_max
+
+        if ds_lon_min < lon_min:
+            lon_min = ds_lon_min
+
+        if ds_lon_max > lon_max:
+            lon_max = ds_lon_max
+
+        if ds_start < start:
+            start = ds_start
+
+        if ds_end > end:
+            end = ds_end
+
+    return [lat_min, lat_max, lon_min, lon_max, str(start), str(end)]
diff --git a/ocw-config-runner/evaluation_creation.py b/ocw-config-runner/evaluation_creation.py
new file mode 100644
index 0000000..88394de
--- /dev/null
+++ b/ocw-config-runner/evaluation_creation.py
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import dateutil.parser
+from datetime import timedelta
+import logging
+
+from ocw.dataset import Bounds
+from ocw.evaluation import Evaluation
+import ocw.dataset_processor as dsp
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.data_source.esgf as esgf
+import ocw.data_source.dap as dap
+import ocw.metrics as metrics
+
+import numpy as np
+
+logging.basicConfig()
+logger = logging.getLogger(__name__)
+
+def generate_evaluation_from_config(config_data):
+    """ Generate an Evaluation object from configuration data.
+
+    :param config_data: Dictionary of the data parsed from the supplied YAML
+        configuration file.
+    :type config_data: :func:`dict`
+
+    :returns: An Evaluation object containing the data specified in the
+        supplied configuration data.
+    """
+    # Load datasets
+    reference = None
+    targets = []
+    if config_data['datasets']:
+        if 'reference' in config_data['datasets']:
+            reference = _load_dataset(config_data['datasets']['reference'])
+
+        if 'targets' in config_data['datasets']:
+            targets = [_load_dataset(t) for t in config_data['datasets']['targets']]
+
+        reference, targets = _prepare_datasets_for_evaluation(reference,
+                                                              targets,
+                                                              config_data)
+    # Load metrics
+    eval_metrics = []
+    if config_data['metrics']:
+        eval_metrics = [_load_metric(m)() for m in config_data['metrics']]
+
+    # Load Subregions (if present)
+    subregions = None
+    if 'subregions' in config_data:
+        subregions = [_load_subregion(s) for s in config_data['subregions']]
+
+    return Evaluation(reference, targets, eval_metrics, subregions=subregions)
+
+def _load_dataset(dataset_config_data):
+    """"""
+    if dataset_config_data['data_source'] == 'local':
+        if dataset_config_data['file_count'] > 1:
+            logger.error(
+                'Multi-file datasets are currently not supported. Cancelling load '
+                'of the following dataset: {}'.format(dataset_config_data)
+            )
+            return None
+
+        return local.load_file(dataset_config_data['path'],
+                               dataset_config_data['variable'],
+                               **dataset_config_data.get('optional_args', {}))
+    elif dataset_config_data['data_source'] == 'rcmed':
+        return rcmed.parameter_dataset(dataset_config_data['dataset_id'],
+                                       dataset_config_data['parameter_id'],
+                                       dataset_config_data['min_lat'],
+                                       dataset_config_data['max_lat'],
+                                       dataset_config_data['min_lon'],
+                                       dataset_config_data['min_lon'],
+                                       dataset_config_data['start_time'],
+                                       dataset_config_data['end_time'],
+                                       **dataset_config_data.get('optional_args', {}))
+    elif dataset_config_data['data_source'] == 'esgf':
+        return esgf.load_dataset(dataset_config_data['dataset_id'],
+                                 dataset_config_data['variable'],
+                                 dataset_config_data['esgf_username'],
+                                 dataset_config_data['esgf_password'],
+                                 **dataset_config_data.get('optional_args', {}))
+    elif dataset_config_data['data_source'] == 'dap':
+        return dap.load(dataset_config_data['url'],
+                        dataset_config_data['variable'],
+                        **dataset_config_data('optional_args', {}))
+
+def _prepare_datasets_for_evaluation(reference, targets, config_data):
+    """"""
+    subset = config_data['evaluation'].get('subset', None)
+    temporal_time_delta = config_data['evaluation'].get('temporal_time_delta', None)
+    spatial_regrid_lats = config_data['evaluation'].get('spatial_regrid_lats', None)
+    spatial_regrid_lons = config_data['evaluation'].get('spatial_regrid_lons', None)
+
+    # If we have a temporal time delta and it's daily (i.e., 1) we will
+    # normalize the data as daily data (which means we adjust the start times
+    # for each bucket of data to be consistent). By default we will normalize
+    # the data as monthly. Note that this will not break yearly data so it's
+    # safer to do this no matter what. This keeps us from ending up with 1-off
+    # errors in the resulting dataset shape post-temporal/spatial adjustments
+    # that break evaluations.
+    string_time_delta = 'monthly'
+    if temporal_time_delta and temporal_time_delta == 1:
+        string_time_delta = 'daily'
+
+    reference = dsp.normalize_dataset_datetimes(reference, string_time_delta)
+    targets = [dsp.normalize_dataset_datetimes(t, string_time_delta) for t in targets]
+
+    if subset:
+        start = dateutil.parser.parse(subset[4])
+        end = dateutil.parser.parse(subset[5])
+        bounds = Bounds(subset[0], subset[1], subset[2], subset[3], start, end)
+
+        if reference:
+            reference = dsp.safe_subset(bounds, reference)
+
+        if targets:
+            targets = [dsp.safe_subset(bounds, t) for t in targets]
+
+    if temporal_time_delta:
+        resolution = timedelta(temporal_time_delta)
+
+        if reference:
+            reference = dsp.temporal_rebin(reference, resolution)
+
+        if targets:
+            targets = [dsp.temporal_rebin(t, resolution) for t in targets]
+
+    if spatial_regrid_lats and spatial_regrid_lons:
+        lats = np.arange(spatial_regrid_lats[0], spatial_regrid_lats[1], spatial_regrid_lats[2])
+        lons = np.arange(spatial_regrid_lons[0], spatial_regrid_lons[1], spatial_regrid_lons[2])
+
+        if reference:
+            reference = dsp.spatial_regrid(reference, lats, lons)
+
+        if targets:
+            targets = [dsp.spatial_regrid(t, lats, lons) for t in targets]
+
+    return reference, targets
+
+def _load_metric(metric_config_data):
+    """"""
+    # If the dataset is user defined outside of ocw.metrics we won't currently
+    # handle loading it.
+    if '.' in metric_config_data:
+        logger.error(
+            'User-defined metrics outside of the ocw.metrics module '
+            'cannot currently be loaded. If you just wanted a metric '
+            'found in ocw.metrics then do not specify the full '
+            'package and module names. See the documentation for examples.'
+        )
+        return None
+
+    return getattr(metrics, metric_config_data)
+
+def _load_subregion(subregion_config_data):
+    """"""
+    return Bounds(float(subregion_config_data[0]),
+                  float(subregion_config_data[1]),
+                  float(subregion_config_data[2]),
+                  float(subregion_config_data[3]))
diff --git a/ocw-config-runner/example/portrait_diagram.yaml b/ocw-config-runner/example/portrait_diagram.yaml
new file mode 100644
index 0000000..f3e45de
--- /dev/null
+++ b/ocw-config-runner/example/portrait_diagram.yaml
@@ -0,0 +1,30 @@
+evaluation:
+    temporal_time_delta: 365
+    spatial_regrid_lats: !!python/tuple [-40, 40, 1]
+    spatial_regrid_lons: !!python/tuple [-40, 40, 1]
+
+datasets:
+    reference:
+        data_source: local
+        file_count: 1
+        path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+        variable: tasmax
+
+    targets:
+        - data_source: local
+          file_count: 1
+          path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+          variable: tasmax
+          optional_args:
+              name: WRF
+metrics:
+    - PatternCorrelation
+
+plots:
+    - type: portrait
+      metric_index: 0
+      output_name: portrait_test
+
+subregions:
+    - [-10.0, 0.0, 29.0, 36.5] 
+    - [0.0, 10.0,  29.0, 37.5] 
diff --git a/ocw-config-runner/example/simple_model_to_model_bias.yaml b/ocw-config-runner/example/simple_model_to_model_bias.yaml
new file mode 100644
index 0000000..bd59fbd
--- /dev/null
+++ b/ocw-config-runner/example/simple_model_to_model_bias.yaml
@@ -0,0 +1,35 @@
+evaluation:
+    temporal_time_delta: 365
+    spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+    spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+
+datasets:
+    reference:
+        data_source: local
+        file_count: 1
+        path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+        variable: tasmax
+
+    targets:
+        - data_source: local
+          file_count: 1
+          path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+          variable: tasmax
+metrics:
+    - Bias
+
+plots:
+    - type: contour
+      results_indices:
+          - !!python/tuple [0, 0]
+      lats:
+          range_min: -20
+          range_max: 20
+          range_step: 1
+      lons:
+          range_min: -20
+          range_max: 20
+          range_step: 1
+      output_name: wrf_bias_compared_to_knmi
+      optional_args:
+          gridshape: !!python/tuple [6, 6]
diff --git a/ocw-config-runner/example/subregion.yaml b/ocw-config-runner/example/subregion.yaml
new file mode 100644
index 0000000..d39e39b
--- /dev/null
+++ b/ocw-config-runner/example/subregion.yaml
@@ -0,0 +1,30 @@
+subregions:
+    - [-10.0, 0.0, 29.0, 36.5]
+    - [0.0, 10.0,  29.0, 37.5]
+    - [10.0, 20.0, 25.0, 32.5]
+    - [20.0, 33.0, 25.0, 32.5]
+    - [-19.3,-10.2,12.0, 20.0]
+    - [15.0, 30.0, 15.0, 25.0]
+    - [-10.0, 10.0, 7.3, 15.0]
+    - [-10.9, 10.0, 5.0, 7.3]
+    - [33.9, 40.0,  6.9, 15.0]
+    - [10.0, 25.0,  0.0, 10.0]
+    - [10.0, 25.0,-10.0,  0.0]
+    - [30.0, 40.0,-15.0,  0.0]
+    - [33.0, 40.0, 25.0, 35.0]
+
+datasets:
+
+metrics:
+
+plots:
+    - type: subregion
+      output_name: subregion_plot
+      lats:
+          range_min: -45.0
+          range_max: 42.24
+          range_step: 0.5
+      lons:
+          range_min: -24.0
+          range_max: 60.0
+          range_step: 0.5
diff --git a/ocw-config-runner/example/taylor_diagram_example.yaml b/ocw-config-runner/example/taylor_diagram_example.yaml
new file mode 100644
index 0000000..66eab3b
--- /dev/null
+++ b/ocw-config-runner/example/taylor_diagram_example.yaml
@@ -0,0 +1,36 @@
+evaluation:
+    temporal_time_delta: 30
+    spatial_regrid_lats: !!python/tuple [-45, 42, 1]
+    spatial_regrid_lons: !!python/tuple [-24, 60, 1]
+    subset: [-45, 42, -24, 60, "1989-01-01", "1989-12-01"]
+
+datasets:
+    reference:
+        data_source: local
+        file_count: 1
+        path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+        variable: tasmax
+        optional_args:
+            name: dataset1
+
+    targets:
+        - data_source: local
+          file_count: 1
+          path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+          variable: tasmax
+          optional_args:
+              name: dataset2
+metrics:
+    - StdDevRatio
+    - PatternCorrelation
+
+plots:
+    - type: taylor
+      stddev_results_indices:
+          - !!python/tuple [0, 0]
+      pattern_corr_results_indices:
+          - !!python/tuple [0, 1]
+      output_name: taylor_plot
+      optional_args:
+          fmt: png
+          frameon: False
diff --git a/ocw-config-runner/example/time_series_plot_example.yaml b/ocw-config-runner/example/time_series_plot_example.yaml
new file mode 100644
index 0000000..b5599cc
--- /dev/null
+++ b/ocw-config-runner/example/time_series_plot_example.yaml
@@ -0,0 +1,31 @@
+evaluation:
+    temporal_time_delta: 30
+    spatial_regrid_lats: !!python/tuple [-20, 20, 1]
+    spatial_regrid_lons: !!python/tuple [-20, 20, 1]
+    subset: [-180, 180, -90, 90, "1989-01-01", "1990-12-01"]
+
+datasets:
+    reference:
+        data_source: local
+        file_count: 1
+        path: /tmp/AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
+        variable: tasmax
+        optional_args:
+            name: KNMI
+
+    targets:
+        - data_source: local
+          file_count: 1
+          path: /tmp/AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
+          variable: tasmax
+          optional_args:
+            name: WRF
+metrics:
+
+plots:
+    - type: time_series
+      time_range: monthly
+
+subregions:
+    - [-10.0, 0.0, -19.0, 19.0]
+    - [0.0, 10.0,  -10.0, 10.0]
diff --git a/ocw-config-runner/ocw_evaluation_from_config.py b/ocw-config-runner/ocw_evaluation_from_config.py
new file mode 100644
index 0000000..93b411a
--- /dev/null
+++ b/ocw-config-runner/ocw_evaluation_from_config.py
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import logging
+
+from configuration_parsing import is_config_valid
+from evaluation_creation import generate_evaluation_from_config
+from plot_generation import plot_from_config
+
+import yaml
+
+logging.basicConfig()
+logger = logging.getLogger(__name__)
+
+def run_evaluation_from_config(config_file_path, ignore_config_errors=False):
+    """ Run an OCW evaluation specified by a config file.
+
+    :param config_file_path: The file path to a OCW compliant YAML file
+        specifying how to run the evaluation. For additional information on 
+        the valid options that you can set in the config please check the
+        project wiki https://cwiki.apache.org/confluence/display/climate/home#'.
+    :type config_file_path: :mod:`string`
+
+    :param ignore_config_errors: When this is true configuration parsing errors
+        will NOT interrupt the evaluation run. Note, it is very unlikely that
+        you will want this value set. However it is possible that you will want
+        to graph something that doesn't require a full evaluation run. This is
+        provided for that situation.
+    :type ignore_config_errors: :func:`bool`
+    """
+    config = yaml.load(open(config_file_path, 'r'))
+
+    if not ignore_config_errors and not is_config_valid(config):
+        logger.warning(
+            'Unable to validate configuration file. Exiting evaluation. '
+            'Please check documentation for config information.'
+        )
+
+        sys.exit(1)
+
+    evaluation = generate_evaluation_from_config(config)
+
+    if evaluation._evaluation_is_valid():
+        evaluation.run()
+
+    plot_from_config(evaluation, config)
+
+if __name__ == '__main__':
+    description = 'OCW Config Based Evaluation'
+    epilog = 'Additional information at https://cwiki.apache.org/confluence/display/climate/home#'
+
+    parser = argparse.ArgumentParser(description=description, epilog=epilog)
+    parser.add_argument('config', help='Path to YAML config file for the evaluation')
+    parser.add_argument('ignore_config_errors', nargs='?', default=False, type=bool)
+    args = parser.parse_args()
+
+    run_evaluation_from_config(args.config, args.ignore_config_errors)
diff --git a/ocw-config-runner/plot_generation.py b/ocw-config-runner/plot_generation.py
new file mode 100644
index 0000000..392331d
--- /dev/null
+++ b/ocw-config-runner/plot_generation.py
@@ -0,0 +1,204 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+import ocw.dataset_processor as dsp
+import ocw.plotter as plots
+import ocw.utils as utils
+
+import numpy as np
+
+logging.basicConfig()
+logger = logging.getLogger(__name__)
+
+def plot_from_config(evaluation, config_data):
+    """ Generate plots for an evaluation from configuration data.
+
+    :param evaluation: The Evaluation for which to generate plots.
+    :type evaluation: :class:`ocw.evaluation.Evaluation`
+    :param config_data: Dictionary of the data parsed from the supplied YAML
+        configuration file.
+    :type: :func:`dict`
+    """
+    for plot in config_data['plots']:
+        if plot['type'] == 'contour':
+            _draw_contour_plot(evaluation, plot)
+        elif plot['type'] == 'subregion':
+            _draw_subregion_diagram(evaluation, plot)
+        elif plot['type'] == 'taylor':
+            _draw_taylor_diagram(evaluation, plot)
+        elif plot['type'] == 'time_series':
+            _draw_time_series_plot(evaluation, plot)
+        elif plot['type'] == 'portrait':
+            _draw_portrait_diagram(evaluation, plot)
+        else:
+            logger.error('Unrecognized plot type requested: {}'.format(plot['type']))
+
+def _draw_contour_plot(evaluation, plot_config):
+    """"""
+    lats = plot_config['lats']
+    if type(lats) != type(list):
+        lats = np.arange(lats['range_min'], lats['range_max'], lats['range_step'])
+
+    lons = plot_config['lons']
+    if type(lons) != type(list):
+        lons = np.arange(lons['range_min'], lons['range_max'], lons['range_step'])
+
+    for i, index in enumerate(plot_config['results_indices']):
+        if len(index) == 2:
+            target, metric = index
+            vals = evaluation.results[target][metric]
+        elif len(index) == 3:
+            target, metric, subregion = index
+            vals = evaluation.results[target][metric][subregion]
+
+        plot_name = plot_config['output_name'] + '_{}'.format(i)
+        plots.draw_contour_map(vals,
+                               np.array(lats),
+                               np.array(lons),
+                               plot_name,
+                               **plot_config.get('optional_args', {}))
+
+def _draw_taylor_diagram(evaluation, plot_config):
+    """"""
+    plot_name = plot_config['output_name']
+    ref_dataset_name = evaluation.ref_dataset.name
+    target_dataset_names = [t.name for t in evaluation.target_datasets]
+
+    if len(plot_config['stddev_results_indices'][0]) == 2:
+        stddev_results = [
+            evaluation.results[tar][met]
+            for (tar, met) in plot_config['stddev_results_indices']
+        ]
+
+        pattern_corr_results = [
+            evaluation.results[tar][met]
+            for (tar, met) in plot_config['pattern_corr_results_indices']
+        ]
+    elif len(plot_config['stddev_results_indices'][0]) == 3:
+        stddev_results = [
+            evaluation.results[tar][met][sub]
+            for (tar, met, sub) in plot_config['stddev_results_indices']
+        ]
+
+        pattern_corr_results = [
+            evaluation.results[tar][met][sub]
+            for (tar, met, sub) in plot_config['pattern_corr_results_indices']
+        ]
+
+    plot_data = np.array([stddev_results, pattern_corr_results]).transpose()
+
+    plots.draw_taylor_diagram(plot_data,
+                              target_dataset_names,
+                              ref_dataset_name,
+                              fname=plot_name,
+                              **plot_config.get('optional_args', {}))
+
+def _draw_subregion_diagram(evaluation, plot_config):
+    """"""
+    lats = plot_config['lats']
+    if type(lats) != type(list):
+        lats = np.arange(lats['range_min'], lats['range_max'], lats['range_step'])
+
+    lons = plot_config['lons']
+    if type(lons) != type(list):
+        lons = np.arange(lons['range_min'], lons['range_max'], lons['range_step'])
+
+    plots.draw_subregions(evaluation.subregions,
+                          lats,
+                          lons,
+                          plot_config['output_name'],
+                          **plot_config.get('optional_args', {}))
+
+def _draw_portrait_diagram(evaluation, plot_config):
+    """"""
+    metric_index = plot_config['metric_index']
+
+    diagram_data = np.array(evaluation.results[:][metric_index][:])
+    subregion_names = ["R{}".format(i) for i in range(len(evaluation.subregions))]
+    target_names = [t.name for t in evaluation.target_datasets]
+
+    plots.draw_portrait_diagram(diagram_data,
+                                target_names,
+                                subregion_names,
+                                fname=plot_config['output_name'],
+                                **plot_config.get('optional_args', {}))
+
+def _draw_time_series_plot(evaluation, plot_config):
+    """"""
+    time_range_info = plot_config['time_range']
+    ref_ds = evaluation.ref_dataset
+    target_ds = evaluation.target_datasets
+
+    if time_range_info == 'monthly':
+        ref_ds.values, ref_ds.times = utils.calc_climatology_monthly(ref_ds)
+
+        for t in target_ds:
+            t.values, t.times = utils.calc_climatology_monthly(t)
+    else:
+        logger.error(
+            'Invalid time range provided. Only monthly is supported '
+            'at the moment'
+        )
+        return
+
+    if evaluation.subregions:
+        for bound_count, bound in enumerate(evaluation.subregions):
+            results = []
+            labels = []
+
+            subset = dsp.subset(
+                bound,
+                ref_ds,
+                subregion_name="R{}_{}".format(bound_count, ref_ds.name)
+            )
+
+            results.append(utils.calc_time_series(subset))
+            labels.append(subset.name)
+
+            for t in target_ds:
+                subset = dsp.subset(
+                    bound,
+                    t,
+                    subregion_name="R{}_{}".format(bound_count, t.name)
+                )
+                results.append(utils.calc_time_series(subset))
+                labels.append(subset.name)
+
+            plots.draw_time_series(np.array(results),
+                                   ref_ds.times,
+                                   labels,
+                                   'R{}'.format(bound_count),
+                                   **plot_config.get('optional_args', {}))
+
+    else:
+        results = []
+        labels = []
+
+        results.append(utils.calc_time_series(ref_ds))
+        labels.append(ref_ds.name)
+
+        for t in target_ds:
+            results.append(utils.calc_time_series(t))
+            labels.append(t.name)
+
+        plots.draw_time_series(np.array(results),
+                               ref_ds.times,
+                               labels,
+                               'time_series',
+                               **plot_config.get('optional_args', {}))
diff --git a/ocw-config-runner/tests/test_config_parsing.py b/ocw-config-runner/tests/test_config_parsing.py
new file mode 100644
index 0000000..e918405
--- /dev/null
+++ b/ocw-config-runner/tests/test_config_parsing.py
@@ -0,0 +1,806 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from mock import patch
+import unittest
+
+import configuration_parsing as parser
+import ocw.metrics as metrics
+
+import yaml
+
+
+class TestIsConfigValid(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        not_minimal_config = """
+            datasets:
+        """
+        self.not_minimal = yaml.load(not_minimal_config)
+
+        not_well_formed_config = """
+        datasets:
+            reference:
+                data_source: local
+                file_count: 1
+                path: /a/fake/path/file.py
+                variable: pr
+
+            targets:
+                - data_source: local
+                  file_count: 5
+                  file_glob_pattern: something for globbing files here
+                  variable: pr
+                  optional_args:
+                      name: Target1
+
+                - data_source: esgf
+                  dataset_id: fake dataset id
+                  variable: pr
+                  esgf_username: my esgf username
+                  esgf_password: my esgf password
+
+        metrics:
+            - Bias
+            - TemporalStdDev
+        """
+        self.not_well_formed = yaml.load(not_well_formed_config)
+
+    @patch('configuration_parsing.logger')
+    def test_not_minimal_config(self, mock_logger):
+        ret = parser.is_config_valid(self.not_minimal)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Insufficient configuration file data for an evaluation'
+        )
+
+    @patch('configuration_parsing.logger')
+    def test_not_valid_config(self, mock_logger):
+        ret = parser.is_config_valid(self.not_well_formed)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Configuration data is not well formed'
+        )
+
+
+class TestValidMinimalConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        no_datasets_config = """
+        metrics:
+            - Bias
+        """
+        self.no_datasets = yaml.load(no_datasets_config)
+
+        no_metrics_config = """
+        datasets:
+            reference:
+                data_source: dap
+                url: afakeurl.com
+                variable: pr
+        """
+        self.no_metrics = yaml.load(no_metrics_config)
+
+        unary_with_reference_config = """
+        datasets:
+            reference:
+                data_source: dap
+                url: afakeurl.com
+                variable: pr
+
+        metrics:
+            - TemporalStdDev
+        """
+        self.unary_with_reference = yaml.load(unary_with_reference_config)
+
+        unary_with_target_config = """
+        datasets:
+            targets:
+                - data_source: dap
+                  url: afakeurl.com
+                  variable: pr
+
+        metrics:
+            - TemporalStdDev
+        """
+        self.unary_with_target = yaml.load(unary_with_target_config)
+
+        unary_no_reference_or_target = """
+        datasets:
+            not_ref_or_target:
+                - data_source: dap
+                  url: afakeurl.com
+                  variable: pr
+
+        metrics:
+            - TemporalStdDev
+        """
+        self.unary_no_ref_or_target = yaml.load(unary_no_reference_or_target)
+
+        binary_valid_config = """
+        datasets:
+            reference:
+                data_source: dap
+                url: afakeurl.com
+                variable: pr
+
+            targets:
+                - data_source: dap
+                  url: afakeurl.com
+                  variable: pr
+        metrics:
+            - Bias
+        """
+        self.binary_valid = yaml.load(binary_valid_config)
+
+        binary_no_reference_config = """
+        datasets:
+            targets:
+                - data_source: dap
+                  url: afakeurl.com
+                  variable: pr
+        metrics:
+            - Bias
+        """
+        self.binary_no_reference = yaml.load(binary_no_reference_config)
+
+        binary_no_target_config = """
+        datasets:
+            reference:
+                data_source: dap
+                url: afakeurl.com
+                variable: pr
+
+        metrics:
+            - Bias
+        """
+        self.binary_no_target = yaml.load(binary_no_target_config)
+
+    @patch('configuration_parsing.logger')
+    def test_no_datasets(self, mock_logger):
+        ret = parser._valid_minimal_config(self.no_datasets)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'No datasets specified in configuration data.'
+        )
+
+    @patch('configuration_parsing.logger')
+    def test_no_metrics(self, mock_logger):
+        ret = parser._valid_minimal_config(self.no_metrics)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'No metrics specified in configuration data.'
+        )
+
+    def test_unary_with_reference(self):
+        ret = parser._valid_minimal_config(self.unary_with_reference)
+        self.assertTrue(ret)
+
+    def test_unary_with_target(self):
+        ret = parser._valid_minimal_config(self.unary_with_target)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_unary_no_datasets(self, mock_logger):
+        ret = parser._valid_minimal_config(self.unary_no_ref_or_target)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Unary metric in configuration data requires either a reference '
+            'or target dataset to be present for evaluation. Please ensure '
+            'that your config is well formed.'
+        )
+
+    def test_valid_binary(self):
+        ret = parser._valid_minimal_config(self.binary_valid)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_binary_no_reference(self, mock_logger):
+        ret = parser._valid_minimal_config(self.binary_no_reference)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Binary metric in configuration requires both a reference '
+            'and target dataset to be present for evaluation. Please ensure '
+            'that your config is well formed.'
+        )
+        
+    @patch('configuration_parsing.logger')
+    def test_binary_no_target(self, mock_logger):
+        ret = parser._valid_minimal_config(self.binary_no_target)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Binary metric in configuration requires both a reference '
+            'and target dataset to be present for evaluation. Please ensure '
+            'that your config is well formed.'
+        )
+
+
+class TestConfigIsWellFormed(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        malformed_reference_config = """
+            datasets:
+                reference:
+                    data_source: notavalidlocation
+
+            metrics:
+                - Bias
+        """
+        self.malformed_reference_conf = yaml.load(malformed_reference_config)
+
+        malformed_target_list_config = """
+            datasets:
+                targets:
+                    notalist: 
+                        a_key: a_value
+
+                    alsonotalist:
+                        a_key: a_value
+
+            metrics:
+                - Bias
+        """
+        self.malformed_target_list = yaml.load(malformed_target_list_config)
+
+        missing_metric_name_config = """
+            datasets:
+                reference:
+                    data_source: dap
+                    url: afakeurl.com
+                    variable: pr
+
+            metrics:
+                - NotABuiltInMetric
+        """
+        self.missing_metric_name = yaml.load(missing_metric_name_config)
+
+        bad_plot_config = """
+            datasets:
+                reference:
+                    data_source: dap
+                    url: afakeurl.com
+                    variable: pr
+
+            metrics:
+                - Bias
+
+            plots:
+                - type: NotARealPlotName
+        """
+        bad_plot = yaml.load(bad_plot_config)
+
+        bad_subregion_config_type = """
+            datasets:
+                reference:
+                    data_source: dap
+                    url: afakeurl.com
+                    variable: pr
+
+            metrics:
+                - Bias
+
+            subregions:
+                - this is a string instead of a list
+        """
+        self.bad_subregion_type = yaml.load(bad_subregion_config_type)
+
+        bad_subregion_config_length = """
+            datasets:
+                reference:
+                    data_source: dap
+                    url: afakeurl.com
+                    variable: pr
+
+            metrics:
+                - Bias
+
+            subregions:
+                - [1, 2, 3, 4, 5]
+        """
+        self.bad_subregion_length = yaml.load(bad_subregion_config_length)
+
+    def test_malformed_reference_config(self):
+        ret = parser._config_is_well_formed(self.malformed_reference_conf)
+        self.assertFalse(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_malformed_target_dataset_list(self, mock_logger):
+        ret = parser._config_is_well_formed(self.malformed_target_list)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            "Expected to find list of target datasets but instead found "
+            "object of type <type 'dict'>"
+        )
+
+    def test_not_builtin_metric(self):
+        ret = parser._config_is_well_formed(self.missing_metric_name)
+        self.assertFalse(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_warns_regarding_not_builtin_metric(self, mock_logger):
+        ret = parser._config_is_well_formed(self.missing_metric_name)
+        mock_logger.warn.assert_called_with(
+            'Unable to locate metric name NotABuiltInMetric in built-in '
+            'metrics. If this is not a user defined metric then please check '
+            'for potential misspellings.'
+        )
+
+    def test_bad_plot_config(self):
+        ret = parser._config_is_well_formed(self.missing_metric_name)
+        self.assertFalse(ret)
+    
+    def test_bad_subregion_type(self):
+        ret = parser._config_is_well_formed(self.bad_subregion_type)
+        self.assertFalse(ret)
+
+    def test_bad_subregion_length(self):
+        ret = parser._config_is_well_formed(self.bad_subregion_length)
+        self.assertFalse(ret)
+
+
+class MetricFetchTest(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        binary_config = """
+            metrics:
+                - Bias
+                - StdDevRatio
+        """
+        unary_config = """
+            metrics:
+                - TemporalStdDev
+        """
+        self.unary_conf = yaml.load(unary_config)
+        self.binary_conf = yaml.load(binary_config)
+
+    def test_contains_binary_metric(self):
+        ret = parser._contains_binary_metrics(self.binary_conf['metrics'])
+        self.assertTrue(ret)
+
+    def test_does_not_contain_binary_metric(self):
+        ret = parser._contains_binary_metrics(self.unary_conf['metrics'])
+        self.assertFalse(ret)
+
+    def test_contains_unary_metric(self):
+        ret = parser._contains_unary_metrics(self.unary_conf['metrics'])
+        self.assertTrue(ret)
+        
+    def test_does_not_contain_unary_metric(self):
+        ret = parser._contains_unary_metrics(self.binary_conf['metrics'])
+        self.assertFalse(ret)
+
+
+class InvalidDatasetConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        example_config_yaml = """
+            - file_count: 1
+              path: /a/fake/path
+              variable: pr
+
+            - data_source: invalid_location_identifier
+        """
+        conf = yaml.load(example_config_yaml)
+        self.missing_data_source = conf[0]
+        self.invalid_data_source = conf[1]
+
+    @patch('configuration_parsing.logger')
+    def test_missing_data_source_config(self, mock_logger):
+        parser._valid_dataset_config_data(self.missing_data_source)
+        mock_logger.error.assert_called_with(
+            'Dataset does not contain a data_source attribute.'
+        )
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_data_source(self, mock_logger):
+        parser._valid_dataset_config_data(self.invalid_data_source)
+        mock_logger.error.assert_called_with(
+            'Dataset does not contain a valid data_source location.'
+        )
+
+
+class TestLocalDatasetConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.required_local_keys = set(['data_source', 'file_count', 'path', 'variable'])
+        example_config_yaml = """
+            - data_source: local
+              file_count: 1
+              path: /a/fake/path
+              variable: pr
+              optional_args:
+                  name: Target1
+
+            - data_source: local
+
+            - data_source: local
+              file_count: 5
+              file_glob_pattern: something for globbing files here
+              variable: pr
+              path: /a/fake/path
+              optional_args:
+                  name: Target1
+
+            - data_source: local
+              file_count: 5
+              variable: pr
+              path: /a/fake/path
+        """
+
+        conf = yaml.load(example_config_yaml)
+        self.valid_local_single = conf[0]
+        self.invalid_local_single = conf[1]
+        self.valid_local_multi = conf[2]
+        self.invalid_local_multi = conf[1]
+        self.invalid_local_multi_file_glob = conf[3]
+
+    def test_valid_local_config_single_file(self):
+        ret = parser._valid_dataset_config_data(self.valid_local_single)
+        self.assertTrue(ret)
+
+    def test_valid_local_config_multi_file(self):
+        ret = parser._valid_dataset_config_data(self.valid_local_multi)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_local_config(self, mock_logger):
+        parser._valid_dataset_config_data(self.invalid_local_single)
+
+        present_keys = set(self.invalid_local_single.keys())
+        missing_keys = self.required_local_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        error = (
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        mock_logger.error.assert_called_with(error)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_local_config_multi_file(self, mock_logger):
+        # mutlifile config is handled slightly differently. We should see the
+        # same missing keys in this situation as we would on the single file
+        # local config. We will test for a missing file_glob_pattern in a
+        # different test.
+        parser._valid_dataset_config_data(self.invalid_local_multi)
+
+        present_keys = set(self.invalid_local_multi.keys())
+        missing_keys = self.required_local_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        error = (
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        mock_logger.error.assert_called_with(error)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_local_config_multi_file_missing_file_glob(self, mock_logger):
+        # We can't check for the file_glob_pattern pattern until after we have
+        # verified that the single local file config has been met.
+        parser._valid_dataset_config_data(self.invalid_local_multi_file_glob)
+
+        mock_logger.error.assert_called_with(
+            'Multi-file local dataset is missing key: file_glob_pattern'
+        )
+
+
+class TestRCMEDDatasetConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.required_rcmed_keys = set([
+            'dataset_id',
+            'parameter_id',
+            'min_lat',
+            'max_lat',
+            'min_lon',
+            'max_lon',
+            'start_time',
+            'end_time'
+        ])
+        example_config_yaml = """
+            - data_source: rcmed
+              dataset_id: 4
+              parameter_id: 4
+              min_lat: -40
+              max_lat: 40
+              min_lon: -50
+              max_lon: 50
+              start_time: YYYY-MM-DDThh:mm:ss
+              end_time: YYYY-MM-DDThh:mm:ss
+
+            - data_source: rcmed
+        """
+        conf = yaml.load(example_config_yaml)
+        self.valid_rcmed = conf[0]
+        self.invalid_rcmed = conf[1]
+
+    def test_valid_rcmed_config(self):
+        ret = parser._valid_dataset_config_data(self.valid_rcmed)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_rcmed_config(self, mock_logger):
+        parser._valid_dataset_config_data(self.invalid_rcmed)
+
+        present_keys = set(self.invalid_rcmed.keys())
+        missing_keys = self.required_rcmed_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        error = (
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        mock_logger.error.assert_called_with(error)
+
+
+class TestESGFDatasetConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.required_esgf_keys = set([
+            'data_source',
+            'dataset_id',
+            'variable',
+            'esgf_username',
+            'esgf_password'
+        ])
+        example_config_yaml = """
+           - data_source: esgf
+             dataset_id: fake dataset id
+             variable: pr
+             esgf_username: my esgf username
+             esgf_password: my esgf password
+
+           - data_source: esgf
+        """
+        conf = yaml.load(example_config_yaml)
+        self.valid_esgf = conf[0]
+        self.invalid_esgf = conf[1]
+
+    def test_valid_esgf_conf(self):
+        ret = parser._valid_dataset_config_data(self.valid_esgf)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_esgf_conf(self, mock_logger):
+        parser._valid_dataset_config_data(self.invalid_esgf)
+
+        present_keys = set(self.invalid_esgf.keys())
+        missing_keys = self.required_esgf_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        error = (
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        mock_logger.error.assert_called_with(error)
+
+
+class TestDAPDatasetConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.required_dap_keys = set(['url', 'variable'])
+        example_config_yaml = """
+           - data_source: dap
+             url: afakeurl.com
+             variable: pr
+
+           - data_source: dap
+        """
+        conf = yaml.load(example_config_yaml)
+        self.valid_dap = conf[0]
+        self.invalid_dap = conf[1]
+
+    def test_valid_dap_config(self):
+        ret = parser._valid_dataset_config_data(self.valid_dap)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_dap_config(self, mock_logger):
+        parser._valid_dataset_config_data(self.invalid_dap)
+
+        present_keys = set(self.invalid_dap.keys())
+        missing_keys = self.required_dap_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        error = (
+            'Dataset does not contain required keys. '
+            'The following keys are missing: {}'.format(', '.join(missing))
+        )
+        mock_logger.error.assert_called_with(error)
+
+
+class ContourMapConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        valid_contour_config = """
+            type: contour
+            results_indices:
+                - !!python/tuple [0, 0]
+            lats:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            lons:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            output_name: wrf_bias_compared_to_knmi
+        """
+        self.valid_contour = yaml.load(valid_contour_config)
+
+        missing_keys_contour_config = """
+            type: contour
+        """
+        self.missing_keys_contour = yaml.load(missing_keys_contour_config)
+
+        self.required_contour_keys = set([
+            'results_indices',
+            'lats',
+            'lons',
+            'output_name'
+        ])
+
+    def test_valid_contour(self):
+        ret = parser._valid_plot_config_data(self.valid_contour)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_missing_keys_contour(self, mock_logger):
+        ret = parser._valid_plot_config_data(self.missing_keys_contour)
+
+        present_keys = set(self.missing_keys_contour.keys())
+        missing_keys = self.required_contour_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        err = (
+            'Plot config does not contain required keys. '
+            'The following keys are missing: {}'
+        ).format(', '.join(missing))
+        mock_logger.error.assert_called_with(err)
+
+
+class TestSubregionPlotConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        valid_subregion_config = """
+            type: subregion
+            lats:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            lons:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            output_name: fake_plot_name
+        """
+        self.valid_subregion = yaml.load(valid_subregion_config)
+
+        missing_keys_subregion_config = """
+            type: subregion
+        """
+        self.missing_keys_subregion = yaml.load(missing_keys_subregion_config)
+
+        self.required_subregion_keys = set([
+            'lats',
+            'lons',
+            'output_name'
+        ])
+
+    def test_valid_subregion(self):
+        ret = parser._valid_plot_config_data(self.valid_subregion)
+        self.assertTrue(ret)
+
+    @patch('configuration_parsing.logger')
+    def test_missing_keys_subregion(self, mock_logger):
+        ret = parser._valid_plot_config_data(self.missing_keys_subregion)
+
+        present_keys = set(self.missing_keys_subregion.keys())
+        missing_keys = self.required_subregion_keys - present_keys
+        missing = sorted(list(missing_keys))
+
+        err = (
+            'Plot config does not contain required keys. '
+            'The following keys are missing: {}'
+        ).format(', '.join(missing))
+        mock_logger.error.assert_called_with(err)
+
+
+class TestInvalidPlotConfig(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        bad_plot_type_config = """
+            type: NotAPlotType
+        """
+        self.bad_plot_type = yaml.load(bad_plot_type_config)
+
+        missing_plot_type_config = """
+            results_indices:
+                - !!python/tuple [0, 0]
+            lats:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            lons:
+                range_min: -20
+                range_max: 20
+                range_step: 1
+            output_name: wrf_bias_compared_to_knmi
+        """
+        self.missing_plot_type = yaml.load(missing_plot_type_config)
+
+        missing_subregions_for_plot_type = """
+            datasets:
+                - blah
+
+            metrics:
+                - blah
+            
+            plots:
+                - type: subregion
+                  results_indices:
+                      - !!python/tuple [0, 0]
+                  lats:
+                      range_min: -20
+                      range_max: 20
+                      range_step: 1
+                  lons:
+                      range_min: -20
+                      range_max: 20
+                      range_step: 1
+                  output_name: wrf_bias_compared_to_knmi
+        """
+        self.missing_subregions = yaml.load(missing_subregions_for_plot_type)
+
+    @patch('configuration_parsing.logger')
+    def test_invalid_plot_type(self, mock_logger):
+        ret = parser._valid_plot_config_data(self.bad_plot_type)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Invalid plot type specified.'
+        )
+
+    @patch('configuration_parsing.logger')
+    def test_missing_plot_type(self, mock_logger):
+        ret = parser._valid_plot_config_data(self.missing_plot_type)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Plot config does not include a type attribute.'
+        )
+        
+    @patch('configuration_parsing.logger')
+    def test_missing_subregion(self, mock_logger):
+        ret = parser._config_is_well_formed(self.missing_subregions)
+        self.assertFalse(ret)
+
+        mock_logger.error.assert_called_with(
+            'Plot config that requires subregion information is present '
+            'in a config file without adequate subregion information '
+            'provided. Please ensure that you have properly supplied 1 or '
+            'more subregion config values.'
+        )
diff --git a/ocw-config-runner/tests/test_config_writer.py b/ocw-config-runner/tests/test_config_writer.py
new file mode 100644
index 0000000..f163989
--- /dev/null
+++ b/ocw-config-runner/tests/test_config_writer.py
@@ -0,0 +1,768 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from mock import patch
+import os
+import unittest
+
+from ocw.dataset import Dataset, Bounds
+from ocw.evaluation import Evaluation
+import ocw.metrics as metrics
+import configuration_writer as writer
+
+import datetime as dt
+import numpy as np
+import yaml
+
+
+class TestLocalDatasetExportGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.origin = {
+            'source': 'local',
+            'path': '/a/fake/path.nc',
+            'lat_name': 'a lat name',
+            'lon_name': 'a lon name',
+            'time_name': 'a time name',
+            'elevation_index': 2
+        }
+        self.name = 'name'
+
+        self.dataset = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            origin=self.origin,
+            name=self.name
+        )
+
+        self.exported_info = writer.generate_dataset_config(self.dataset)
+
+    def test_proper_data_source_export(self):
+        self.assertTrue('data_source' in self.exported_info)
+        self.assertEqual(self.exported_info['data_source'],
+                         self.origin['source'])
+
+    def test_proper_path_export(self):
+        self.assertEqual(self.exported_info['path'], self.origin['path'])
+
+    def test_proper_variable_name_export(self):
+        self.assertEqual(self.exported_info['variable'], self.variable)
+
+    def test_proper_units_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['units'],
+                         self.units)
+
+    def test_proper_lats_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['lat_name'],
+                         self.origin['lat_name'])
+
+    def test_proper_lons_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['lon_name'],
+                         self.origin['lon_name'])
+
+    def test_proper_times_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['time_name'],
+                         self.origin['time_name'])
+
+    def test_proper_dataset_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['name'],
+                         self.name)
+
+    def test_proper_elevation_index_export(self):
+        self.assertEqual(self.exported_info['optional_args']['elevation_index'],
+                         self.origin['elevation_index'])
+
+
+class TestRCMEDDatasetExportGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.origin = {
+            'source': 'rcmed',
+            'dataset_id': 4,
+            'parameter_id': 14
+        }
+        self.name = 'name'
+
+        self.dataset = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            origin=self.origin,
+            name=self.name
+        )
+
+        self.exported_info = writer.generate_dataset_config(self.dataset)
+
+    def test_proper_data_source_export(self):
+        self.assertTrue('data_source' in self.exported_info)
+        self.assertEqual(self.exported_info['data_source'],
+                         self.origin['source'])
+
+    def test_proper_dataset_id_export(self):
+        self.assertEqual(self.exported_info['dataset_id'],
+                         self.origin['dataset_id'])
+
+    def test_proper_parameter_id_export(self):
+        self.assertEqual(self.exported_info['parameter_id'],
+                         self.origin['parameter_id'])
+
+    def test_proper_min_lat_export(self):
+        self.assertEqual(self.exported_info['min_lat'], min(self.lats))
+
+    def test_proper_max_lat_export(self):
+        self.assertEqual(self.exported_info['max_lat'], max(self.lats))
+
+    def test_proper_min_lon_export(self):
+        self.assertEqual(self.exported_info['min_lon'], min(self.lons))
+
+    def test_proper_max_lon_export(self):
+        self.assertEqual(self.exported_info['max_lon'], max(self.lons))
+
+    def test_proper_min_time_export(self):
+        self.assertEqual(self.exported_info['start_time'], str(min(self.times)))
+
+    def test_proper_max_time_export(self):
+        self.assertEqual(self.exported_info['end_time'], str(max(self.times)))
+
+    def test_proper_dataset_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['name'],
+                         self.name)
+
+    def test_proper_units_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['units'],
+                         self.units)
+
+
+class TestESGFDatasetExportGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.origin = {
+            'source': 'esgf',
+            'dataset_id': 'esgf dataset id',
+            'variable': 'var'
+        }
+        self.name = 'name'
+
+        self.dataset = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            origin=self.origin,
+            name=self.name
+        )
+
+        self.exported_info = writer.generate_dataset_config(self.dataset)
+
+    def test_proper_data_source_export(self):
+        self.assertTrue('data_source' in self.exported_info)
+        self.assertEqual(self.exported_info['data_source'],
+                     self.origin['source'])
+
+    def test_proper_dataset_id_export(self):
+        self.assertEqual(self.exported_info['dataset_id'],
+                         self.origin['dataset_id'])
+
+    def test_proper_variable_export(self):
+        self.assertEqual(self.exported_info['variable'],
+                         self.origin['variable'])
+
+    def test_proper_dummy_username_export(self):
+        self.assertTrue('esgf_username' in self.exported_info)
+
+    def test_proper_dummy_password_export(self):
+        self.assertTrue('esgf_password' in self.exported_info)
+
+    def test_proper_dataset_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['name'],
+                         self.name)
+
+    def test_proper_units_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['units'],
+                         self.units)
+
+
+class TestDAPDatasetExportGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.origin = {
+            'source': 'dap',
+            'url': 'a fake url',
+        }
+        self.name = 'name'
+
+        self.dataset = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            origin=self.origin,
+            name=self.name
+        )
+
+        self.exported_info = writer.generate_dataset_config(self.dataset)
+
+    def test_proper_data_source_export(self):
+        self.assertTrue('data_source' in self.exported_info)
+        self.assertEqual(self.exported_info['data_source'],
+                     self.origin['source'])
+
+    def test_proper_url_export(self):
+        self.assertEqual(self.exported_info['url'],
+                         self.origin['url'])
+
+    def test_proper_dataset_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['name'],
+                         self.name)
+
+    def test_proper_units_name_export(self):
+        self.assertEqual(self.exported_info['optional_args']['units'],
+                         self.units)
+
+
+class TestDatasetExportFromEvaluation(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.name = 'name'
+
+        self.local_origin = {
+            'source': 'local',
+            'path': '/a/fake/path.nc',
+            'lat_name': 'a lat name',
+            'lon_name': 'a lon name',
+            'time_name': 'a time name',
+            'elevation_index': 2
+        }
+
+        self.rcmed_origin = {
+            'source': 'rcmed',
+            'dataset_id': 4,
+            'parameter_id': 14
+        }
+
+        self.esgf_origin = {
+            'source': 'esgf',
+            'dataset_id': 'esgf dataset id',
+            'variable': 'var'
+        }
+
+        self.dap_origin = {
+            'source': 'dap',
+            'url': 'a fake url',
+        }
+
+        self.local_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.local_origin
+        )
+
+        self.rcmed_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.rcmed_origin
+        )
+
+        self.esgf_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.esgf_origin
+        )
+
+        self.dap_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.dap_origin
+        )
+
+        self.evaluation = Evaluation(
+            self.local_ds,
+            [self.rcmed_ds, self.esgf_ds, self.dap_ds],
+            []
+        )
+
+    def test_contains_only_reference_dataset(self):
+        new_eval = Evaluation(self.local_ds, [], [])
+        out = writer.generate_dataset_information(new_eval)
+
+        self.assertTrue('reference' in out)
+        self.assertTrue('targets' not in out)
+
+    def test_contains_only_target_datasets(self):
+        new_eval = Evaluation(None, [self.local_ds], [])
+        out = writer.generate_dataset_information(new_eval)
+
+        self.assertTrue('reference' not in out)
+        self.assertTrue('targets' in out)
+
+    def test_proper_reference_dataset_export(self):
+        out = writer.generate_dataset_information(self.evaluation)
+
+        self.assertTrue('reference' in out)
+        self.assertTrue(out['reference']['data_source'] == 'local')
+
+    def test_proper_target_datasets_export(self):
+        out = writer.generate_dataset_information(self.evaluation)
+
+        self.assertTrue('targets' in out)
+        self.assertTrue(type(out['targets']) == type(list()))
+        self.assertTrue(len(out['targets']) == 3)
+
+
+class TestMetricExportGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.bias = metrics.Bias()
+        self.tmp_std_dev = metrics.TemporalStdDev()
+        loaded_metrics = [self.bias, self.tmp_std_dev]
+
+        self.evaluation = Evaluation(None, [], loaded_metrics)
+
+    def test_proper_export_format(self):
+        out = writer.generate_metric_information(self.evaluation)
+
+        self.assertTrue(type(out) == type(list()))
+
+        for name in out:
+            self.assertTrue(type(name) == type(str()))
+
+    def test_proper_metric_name_export(self):
+        out = writer.generate_metric_information(self.evaluation)
+
+        self.assertTrue(self.bias.__class__.__name__ in out)
+        self.assertTrue(self.tmp_std_dev.__class__.__name__ in out)
+
+    def test_empty_metrics_in_evaluation(self):
+        new_eval = Evaluation(None, [], [])
+        out = writer.generate_metric_information(new_eval)
+
+        self.assertTrue(type(out) == type(list()))
+        self.assertTrue(len(out) == 0)
+
+
+class TestEvaluationSettingsGeneration(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array(range(-10, 10, 1))
+        self.lons = np.array(range(-20, 20, 1))
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(9600))
+        self.values = flat_array.reshape(12, 20, 40)
+
+        self.dataset = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+        )
+
+        self.evaluation = Evaluation(self.dataset, [], [])
+
+    def test_default_data_return(self):
+        new_eval = Evaluation(None, [], [])
+        default_output = {
+            'temporal_time_delta': 999,
+            'spatial_regrid_lats': (-90, 90, 1),
+            'spatial_regrid_lons': (-180, 180, 1),
+            'subset': [-90, 90, -180, 180, "1500-01-01", "2500-01-01"],
+        }
+
+        out = writer.generate_evaluation_information(new_eval)
+
+        self.assertEquals(default_output, out)
+
+    def test_handles_only_reference_dataset(self):
+        new_eval = Evaluation(self.dataset, [], [])
+
+        default_output = {
+            'temporal_time_delta': 999,
+            'spatial_regrid_lats': (-90, 90, 1),
+            'spatial_regrid_lons': (-180, 180, 1),
+            'subset': [-90, 90, -180, 180, "1500-01-01", "2500-01-01"],
+        }
+
+        out = writer.generate_evaluation_information(new_eval)
+
+        self.assertNotEquals(default_output, out)
+
+    def test_handles_only_target_dataset(self):
+        new_eval = Evaluation(None, [self.dataset], [])
+
+        default_output = {
+            'temporal_time_delta': 999,
+            'spatial_regrid_lats': (-90, 90, 1),
+            'spatial_regrid_lons': (-180, 180, 1),
+            'subset': [-90, 90, -180, 180, "1500-01-01", "2500-01-01"],
+        }
+
+        out = writer.generate_evaluation_information(new_eval)
+
+        self.assertNotEquals(default_output, out)
+
+    def test_daily_temporal_bin(self):
+        new_times = np.array([dt.datetime(2000, 1, 1, x) for x in range(1, 13)])
+
+        dataset = Dataset(
+            self.lats,
+            self.lons,
+            new_times,
+            self.values,
+        )
+        new_eval = Evaluation(dataset, [], [])
+
+        out = writer.generate_evaluation_information(new_eval)
+
+        self.assertEquals(out['temporal_time_delta'], 1)
+
+    def test_monthly_temporal_bin(self):
+        out = writer.generate_evaluation_information(self.evaluation)
+
+        self.assertEquals(out['temporal_time_delta'], 31)
+
+    def test_yearly_temporal_bin(self):
+        new_times = np.array([dt.datetime(2000 + x, 1, 1) for x in range(1, 13)])
+
+        dataset = Dataset(
+            self.lats,
+            self.lons,
+            new_times,
+            self.values,
+        )
+        new_eval = Evaluation(dataset, [], [])
+
+        out = writer.generate_evaluation_information(new_eval)
+
+        self.assertEquals(out['temporal_time_delta'], 366)
+
+    def test_spatial_regrid_lats(self):
+        out = writer.generate_evaluation_information(self.evaluation)
+
+        lats = out['spatial_regrid_lats']
+        lat_range = np.arange(lats[0], lats[1], lats[2])
+
+        self.assertTrue(np.array_equal(lat_range, self.lats))
+
+    def test_spatial_regrid_lons(self):
+        out = writer.generate_evaluation_information(self.evaluation)
+
+        lons = out['spatial_regrid_lons']
+        lat_range = np.arange(lons[0], lons[1], lons[2])
+
+        self.assertTrue(np.array_equal(lat_range, self.lons))
+
+    def test_subset_with_single_dataset(self):
+        out = writer.generate_evaluation_information(self.evaluation)
+        subset = out['subset']
+
+        ds_lat_min, ds_lat_max, ds_lon_min, ds_lon_max = self.dataset.spatial_boundaries()
+        start, end = self.dataset.time_range()
+
+        self.assertEqual(ds_lat_min, subset[0])
+        self.assertEqual(ds_lat_max, subset[1])
+        self.assertEqual(ds_lon_min, subset[2])
+        self.assertEqual(ds_lon_max, subset[3])
+        self.assertEquals(str(start), subset[4])
+        self.assertEquals(str(end), subset[5])
+
+    def test_subset_with_multiple_datasets(self):
+        new_ds = Dataset(
+            np.arange(0, 20, 1),
+            self.lons,
+            self.times,
+            self.values
+        )
+        new_eval = Evaluation(self.dataset, [new_ds], [])
+
+        out = writer.generate_evaluation_information(new_eval)
+        subset = out['subset']
+
+        ds_lat_min, ds_lat_max, ds_lon_min, ds_lon_max = self.dataset.spatial_boundaries()
+        start, end = self.dataset.time_range()
+
+        self.assertEqual(ds_lat_min, subset[0])
+        # Check that we actually used the different max lat value that we
+        # created by adding 'new_ds'.
+        self.assertEqual(max(new_ds.lats), subset[1])
+        self.assertEqual(ds_lon_min, subset[2])
+        self.assertEqual(ds_lon_max, subset[3])
+        self.assertEquals(str(start), subset[4])
+        self.assertEquals(str(end), subset[5])
+
+
+class FullExportTest(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self.lats = np.array([10, 12, 14, 16, 18])
+        self.lons = np.array([100, 102, 104, 106, 108])
+        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
+        flat_array = np.array(range(300))
+        self.values = flat_array.reshape(12, 5, 5)
+        self.variable = 'var'
+        self.units = 'units'
+        self.name = 'name'
+
+        self.local_origin = {
+            'source': 'local',
+            'path': '/a/fake/path.nc',
+            'lat_name': 'a lat name',
+            'lon_name': 'a lon name',
+            'time_name': 'a time name',
+            'elevation_index': 2
+        }
+
+        self.rcmed_origin = {
+            'source': 'rcmed',
+            'dataset_id': 4,
+            'parameter_id': 14
+        }
+
+        self.esgf_origin = {
+            'source': 'esgf',
+            'dataset_id': 'esgf dataset id',
+            'variable': 'var'
+        }
+
+        self.dap_origin = {
+            'source': 'dap',
+            'url': 'a fake url',
+        }
+
+        self.local_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.local_origin
+        )
+
+        self.rcmed_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.rcmed_origin
+        )
+
+        self.esgf_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.esgf_origin
+        )
+
+        self.dap_ds = Dataset(
+            self.lats,
+            self.lons,
+            self.times,
+            self.values,
+            variable=self.variable,
+            units=self.units,
+            name=self.name,
+            origin=self.dap_origin
+        )
+
+        self.subregions = [
+            Bounds(-10, 10, -20, 20),
+            Bounds(-5, 5, -15, 15)
+        ]
+
+        self.evaluation = Evaluation(
+            self.local_ds,
+            [self.rcmed_ds, self.esgf_ds, self.dap_ds],
+            [metrics.Bias(), metrics.TemporalStdDev()],
+            subregions=self.subregions
+        )
+
+    @classmethod
+    def tearDownClass(self):
+        if os.path.isfile('/tmp/test_config.yaml'):
+            os.remove('/tmp/test_config.yaml')
+
+    def test_full_export(self):
+        file_path = '/tmp/test_config.yaml'
+        writer.export_evaluation_to_config(
+            self.evaluation,
+            file_path=file_path
+        )
+
+        self.assertTrue(os.path.isfile(file_path))
+
+    def test_proper_metric_export(self):
+        file_path = '/tmp/test_config.yaml'
+        writer.export_evaluation_to_config(
+            self.evaluation,
+            file_path=file_path
+        )
+
+        data = yaml.load(open(file_path, 'r'))
+
+        self.assertTrue('metrics' in data)
+        self.assertTrue(type(data['metrics']) == type(list()))
+
+        for metric in self.evaluation.metrics:
+            self.assertTrue(metric.__class__.__name__ in data['metrics'])
+
+        for metric in self.evaluation.unary_metrics:
+            self.assertTrue(metric.__class__.__name__ in data['metrics'])
+
+        total_eval_metrics = (
+            len(self.evaluation.metrics) +
+            len(self.evaluation.unary_metrics)
+        )
+
+        self.assertTrue(total_eval_metrics, len(data['metrics']))
+
+    def test_proper_dataset_export(self):
+        file_path = '/tmp/test_config.yaml'
+        writer.export_evaluation_to_config(
+            self.evaluation,
+            file_path=file_path
+        )
+
+        data = yaml.load(open(file_path, 'r'))
+
+        self.assertTrue('datasets' in data)
+        self.assertTrue('reference' in data['datasets'])
+        self.assertTrue('targets' in data['datasets'])
+
+        self.assertAlmostEqual(
+            writer.generate_dataset_information(self.evaluation),
+            data['datasets']
+        )
+
+    def test_proper_evaluation_setting_export(self):
+        file_path = '/tmp/test_config.yaml'
+        writer.export_evaluation_to_config(
+            self.evaluation,
+            file_path=file_path
+        )
+
+        data = yaml.load(open(file_path, 'r'))
+
+        self.assertTrue('evaluation' in data)
+        self.assertTrue('temporal_time_delta' in data['evaluation'])
+        self.assertTrue('spatial_regrid_lats' in data['evaluation'])
+        self.assertTrue('spatial_regrid_lons' in data['evaluation'])
+        self.assertTrue('subset' in data['evaluation'])
+
+        self.assertAlmostEqual(
+            writer.generate_evaluation_information(self.evaluation),
+            data['evaluation']
+        )
+
+    def test_proper_subregion_export(self):
+        file_path = '/tmp/test_config.yaml'
+        writer.export_evaluation_to_config(
+            self.evaluation,
+            file_path=file_path
+        )
+
+        data = yaml.load(open(file_path, 'r'))
+
+        self.assertTrue('subregions' in data)
+
+        first_bounds = [
+            self.subregions[0].lat_min,
+            self.subregions[0].lat_max,
+            self.subregions[0].lon_min,
+            self.subregions[0].lon_max,
+        ]
+        second_bounds = [
+            self.subregions[1].lat_min,
+            self.subregions[1].lat_max,
+            self.subregions[1].lon_min,
+            self.subregions[1].lon_max,
+        ]
+
+        self.assertEqual(first_bounds, data['subregions'][0])
+        self.assertEqual(second_bounds, data['subregions'][1])
diff --git a/ocw-config-runner/tests/test_evaluation_creation.py b/ocw-config-runner/tests/test_evaluation_creation.py
new file mode 100644
index 0000000..41f998e
--- /dev/null
+++ b/ocw-config-runner/tests/test_evaluation_creation.py
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from mock import patch
+import unittest
+
+import evaluation_creation as eval_create
+import ocw.metrics
+
+import yaml
+
+class TestMetricLoad(unittest.TestCase):
+    def test_valid_metric_load(self):
+        config = yaml.load("""
+            metrics:
+                - Bias
+        """)
+        loaded_metrics = [eval_create._load_metric(m)()
+                          for m in config['metrics']]
+        self.assertTrue(isinstance(loaded_metrics[0], ocw.metrics.Bias))
+
+    @patch('evaluation_creation.logger')
+    def test_invalid_metric_load(self, mock_logger):
+        config = yaml.load("""
+            metrics:
+                - ocw.metrics.Bias
+        """)
+        eval_create._load_metric(config['metrics'][0])
+        error = (
+            'User-defined metrics outside of the ocw.metrics module '
+            'cannot currently be loaded. If you just wanted a metric '
+            'found in ocw.metrics then do not specify the full '
+            'package and module names. See the documentation for examples.'
+        )
+        mock_logger.error.assert_called_with(error)
diff --git a/ocw-vm/init-ocw-vm.sh b/ocw-vm/init-ocw-vm.sh
index 7a71dee..ea89668 100755
--- a/ocw-vm/init-ocw-vm.sh
+++ b/ocw-vm/init-ocw-vm.sh
@@ -46,7 +46,7 @@
 git clone http://git-wip-us.apache.org/repos/asf/climate.git
 
 # Copy the Easy-OCW install script for Ubuntu
-cp climate/easy-ocw/install-ubuntu-12_04.sh .
+cp climate/easy-ocw/install-ubuntu.sh .
 # Copy the requirements files for conda and pip used by Easy-OCW
 cp climate/easy-ocw/*.txt .
 
diff --git a/ocw/data_source/dap.py b/ocw/data_source/dap.py
index 3694410..f5f9b40 100644
--- a/ocw/data_source/dap.py
+++ b/ocw/data_source/dap.py
@@ -59,7 +59,13 @@
     lons = np.array(dataset[lon][:])
     values = np.array(dataset[:])
 
-    return Dataset(lats, lons, times, values, variable, name=name)
+    origin = {
+        'source': 'dap',
+        'url': url
+    }
+
+    return Dataset(lats, lons, times, values, variable,
+                   name=name, origin=origin)
 
 def _convert_times_to_datetime(time):
     '''Convert the OpenDAP time object's values to datetime objects
diff --git a/ocw/data_source/esgf.py b/ocw/data_source/esgf.py
index c5ed03a..b73e2d8 100644
--- a/ocw/data_source/esgf.py
+++ b/ocw/data_source/esgf.py
@@ -17,6 +17,7 @@
 # under the License.
 #
 
+import os
 import urllib2
 
 from ocw.esgf.constants import DEFAULT_ESGF_SEARCH
@@ -35,6 +36,7 @@
                  search_url=DEFAULT_ESGF_SEARCH,
                  elevation_index=0,
                  name='',
+                 save_path='/tmp',
                  **additional_constraints):
     ''' Load an ESGF dataset.
 
@@ -61,6 +63,9 @@
     :param name: (Optional) A name for the loaded dataset.
     :type name: :mod:`string`
 
+    :param save_path: (Optional) Path to where downloaded files should be saved.
+    :type save_path: :mod:`string`
+
     :param additional_constraints: (Optional) Additional key,value pairs to
         pass as constraints to the search wrapper. These can be anything found
         on the ESGF metadata page for a dataset.
@@ -78,17 +83,30 @@
 
     datasets = []
     for url, var in download_data:
-        _download_files([url], esgf_username, esgf_password)
-        datasets.append(local.load_file('/tmp/' + url.split('/')[-1],
+        _download_files([url],
+                        esgf_username,
+                        esgf_password,
+                        download_directory=save_path)
+
+        file_save_path = os.path.join(save_path, url.split('/')[-1])
+        datasets.append(local.load_file(file_save_path,
                                         var,
                                         name=name,
                                         elevation_index=elevation_index))
 
+    origin = {
+        'source': 'esgf',
+        'dataset_id': dataset_id,
+        'variable': variable
+    }
+    for ds in datasets:
+        ds.origin = origin
+
     return datasets
 
 def _get_file_download_data(dataset_id, variable, url=DEFAULT_ESGF_SEARCH):
     ''''''
-    url += '?distrib=false&type=File&dataset_id={}&variable={}'
+    url += '?type=File&dataset_id={}&variable={}'
     url = url.format(dataset_id, variable)
 
     r = requests.get(url)
diff --git a/ocw/data_source/local.py b/ocw/data_source/local.py
index c6405a9..c0d4b07 100644
--- a/ocw/data_source/local.py
+++ b/ocw/data_source/local.py
@@ -17,8 +17,11 @@
 
 import calendar
 from datetime import timedelta ,datetime
+from time import strptime
+from glob import glob
 import re
 import string
+import os
 
 from ocw.dataset import Dataset
 import ocw.utils as utils
@@ -109,8 +112,53 @@
     )
     raise ValueError(error)
 
+def load_WRF_2d_files(file_path,
+                      filename_pattern,
+                      variable_name,
+                      name=''):
+    ''' Load multiple WRF (or nuWRF) original output files containing 2D fields such as precipitation and surface variables into a Dataset.
+    The dataset can be spatially subset.
+    :param file_path: Directory to the NetCDF file to load.
+    :type file_path: :mod:`string`
+    :param filename_pattern: Path to the NetCDF file to load.
+    :type filename_pattern: :list:`string`
+    :param variable_name: The variable name to load from the NetCDF file.
+    :type variable_name: :mod:`string`
+    :param name: (Optional) A name for the loaded dataset.
+    :type name: :mod:`string`
+    :returns: An OCW Dataset object with the requested variable's data from
+        the NetCDF file.
+    :rtype: :class:`dataset.Dataset`
+    :raises ValueError: 
+    '''                  
+    
+    WRF_files = []
+    for pattern in filename_pattern:
+        WRF_files.extend(glob(file_path + pattern))
+    WRF_files.sort()
+  
+    file_object_first = netCDF4.Dataset(WRF_files[0])
+    lats = file_object_first.variables['XLAT'][0,:]
+    lons = file_object_first.variables['XLONG'][0,:]
+
+    times = []
+    for ifile, file in enumerate(WRF_files):
+        file_object = netCDF4.Dataset(file)
+        time_struct_parsed = strptime(file[-19:],"%Y-%m-%d_%H:%M:%S")     
+        for ihour in numpy.arange(24):
+            times.append(datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
+        values0= file_object.variables[variable_name][:]
+        if ifile == 0:
+            values = file_object.variables[variable_name][:]
+        else:
+            values = numpy.concatenate((values, file_object.variables[variable_name][:])) 
+        file_object.close()
+    times = numpy.array(times)
+    return Dataset(lats, lons, times, values, variable_name, name=name)
+
 def load_file(file_path,
               variable_name,
+              variable_unit = None,
               elevation_index=0,
               name='',
               lat_name=None,
@@ -124,6 +172,9 @@
     :param variable_name: The variable name to load from the NetCDF file.
     :type variable_name: :mod:`string`
 
+    :param variable_unit: (Optional) The variable unit to load from the NetCDF file.
+    :type variable_unit: :mod:`string`
+
     :param elevation_index: (Optional) The elevation index for which data should
         be returned. Climate data is often times 4 dimensional data. Some
         datasets will have readins at different height/elevation levels. OCW
@@ -182,6 +233,7 @@
     times = utils.decode_time_values(netcdf, time_name)
     times = numpy.array(times)
     values = ma.array(netcdf.variables[variable_name][:])
+    variable_unit = netcdf.variables[variable_name].units
 
     # If the values are 4D then we need to strip out the elevation index
     if len(values.shape) == 4:
@@ -206,4 +258,79 @@
         else:
             values = values [:,:,:,elevation_index]
 
-    return Dataset(lats, lons, times, values, variable_name, name=name)
+    origin = {
+        'source': 'local',
+        'path': file_path,
+        'lat_name': lat_name,
+        'lon_name': lon_name,
+        'time_name': time_name
+    }
+    if elevation_index != 0: origin['elevation_index'] = elevation_index
+
+    return Dataset(lats, lons, times, values, variable=variable_name,
+                   units=variable_unit, name=name, origin=origin)
+
+def load_multiple_files(file_path,
+                        filename_pattern,
+                        variable_name,
+                        dataset_name='ref',
+                        variable_unit=None,
+                        lat_name=None,
+                        lon_name=None,
+                        time_name=None):
+    ''' load multiple netcdf files with common filename pattern and return an array of OCW datasets
+
+    :param file_path: directory name where the NetCDF files to load are stored.
+    :type file_path: :mod:`string`
+    :param filename_pattern: common file name patterns
+    :type filename_pattern: :list:`string`
+    :param dataset_name: a name of dataset when reading a single file 
+    :type dataset_name: :mod:'string'
+    :param variable_name: The variable name to load from the NetCDF file.
+    :type variable_name: :mod:`string`
+    :param variable_unit: (Optional) The variable unit to load from the NetCDF file.
+    :type variable_unit: :mod:`string`
+    :param elevation_index: (Optional) The elevation index for which data should
+        be returned. Climate data is often times 4 dimensional data. Some
+        datasets will have readins at different height/elevation levels. OCW
+        expects 3D data so a single layer needs to be stripped out when loading.
+        By default, the first elevation layer is used. If desired you may
+        specify the elevation value to use.
+    :param lat_name: (Optional) The latitude variable name to extract from the
+        dataset.
+    :type lat_name: :mod:`string`
+    :param lon_name: (Optional) The longitude variable name to extract from the
+        dataset.
+    :type lon_name: :mod:`string`
+    :param time_name: (Optional) The time variable name to extract from the
+        dataset.
+    :type time_name: :mod:`string`
+    :returns: An array of OCW Dataset objects, an array of dataset names
+    :rtype: :class:`list`
+    '''
+
+    data_filenames = []
+    for pattern in filename_pattern:
+        data_filenames.extend(glob(file_path + pattern))
+    data_filenames.sort()
+
+    # number of files
+    ndata = len(data_filenames)
+    if ndata == 1:
+        data_name = [dataset_name]
+    else:
+        data_name = []
+        data_filenames_reversed = []
+        for element in data_filenames:
+            data_filenames_reversed.append(element[::-1])
+        prefix = os.path.commonprefix(data_filenames)
+        postfix = os.path.commonprefix(data_filenames_reversed)[::-1]
+        for element in data_filenames:
+            data_name.append(element.replace(prefix,'').replace(postfix,''))
+
+    datasets = []
+    for ifile,filename in enumerate(data_filenames):
+        datasets.append(load_file(filename, variable_name, variable_unit, name=data_name[ifile],
+                        lat_name=lat_name, lon_name=lon_name, time_name=time_name))
+    
+    return datasets
diff --git a/ocw/data_source/rcmed.py b/ocw/data_source/rcmed.py
index d184078..baeb326 100644
--- a/ocw/data_source/rcmed.py
+++ b/ocw/data_source/rcmed.py
@@ -347,7 +347,7 @@
     '''
     
     parameters_metadata = get_parameters_metadata()
-    parameter_name, time_step, _, _, _, _, _= _get_parameter_info(parameters_metadata, parameter_id)
+    parameter_name, time_step, _, _, _, _, parameter_units = _get_parameter_info(parameters_metadata, parameter_id)
     url = _generate_query_url(dataset_id, parameter_id, min_lat, max_lat, min_lon, max_lon, start_time, end_time, time_step)
     lats, lons, times, values = _get_data(url)
 
@@ -355,5 +355,18 @@
     unique_times = _calculate_time(unique_lats_lons_times[2], time_step)
     values = _reshape_values(values, unique_lats_lons_times)
     values = _make_mask_array(values, parameter_id, parameters_metadata)
+
+    origin = {
+        'source': 'rcmed',
+        'dataset_id': dataset_id,
+        'parameter_id': parameter_id
+    }
     
-    return Dataset(unique_lats_lons_times[0], unique_lats_lons_times[1], unique_times, values, parameter_name, name=name)
+    return Dataset(unique_lats_lons_times[0],
+                   unique_lats_lons_times[1],
+                   unique_times,
+                   values,
+                   variable=parameter_name,
+                   units=parameter_units,
+                   name=name,
+                   origin=origin)
diff --git a/ocw/dataset.py b/ocw/dataset.py
index 1d4b2d8..e432928 100644
--- a/ocw/dataset.py
+++ b/ocw/dataset.py
@@ -35,7 +35,8 @@
 class Dataset:
     '''Container for a dataset's attributes and data.'''
 
-    def __init__(self, lats, lons, times, values, variable=None, name=""):
+    def __init__(self, lats, lons, times, values, variable=None, units=None,
+                 origin=None, name=""):
         '''Default Dataset constructor
 
         :param lats: One dimensional numpy array of unique latitude values.
@@ -55,9 +56,16 @@
         :param variable: Name of the value variable.
         :type variable: :mod:`string`
 
+        :param units: Name of the value units
+        :type units: :mod:`string`
+
         :param name: An optional string name for the Dataset.
         :type name: :mod:`string`
 
+        :param origin: An optional object used to specify information on where
+            this dataset was loaded from.
+        :type origin: :class:`dict`
+
         :raises: ValueError
         '''
         self._validate_inputs(lats, lons, times, values)
@@ -68,7 +76,9 @@
         self.times = times
         self.values = values
         self.variable = variable
+        self.units = units
         self.name = name
+        self.origin = origin
 
     def spatial_boundaries(self):
         '''Calculate the spatial boundaries.
@@ -162,25 +172,30 @@
         value_dim = len(values.shape)
         lat_count = lats.shape[0]
         lon_count = lons.shape[0]
+        
+        if lat_dim == 2 and lon_dim == 2:
+            lon_count = lons.shape[1]
         time_count = times.shape[0]
         
-        if lat_dim != 1:
-            err_msg = "Latitude Array should be 1 dimensional.  %s dimensions found." % lat_dim
-        elif lon_dim != 1:
-            err_msg = "Longitude Array should be 1 dimensional. %s dimensions found." % lon_dim
-        elif time_dim != 1:
+        if time_dim != 1:
             err_msg = "Time Array should be 1 dimensional.  %s dimensions found." % time_dim
-        elif value_dim != 3:
-            err_msg = "Value Array should be 3 dimensional.  %s dimensions found." % value_dim
+        elif value_dim < 2:
+            err_msg = "Value Array should be at least 2 dimensional.  %s dimensions found." % value_dim
         # Finally check that the Values array conforms to the proper shape
-        elif values.shape != (time_count, lat_count, lon_count):
+        if value_dim == 2 and values.shape != (lat_count, lon_count):
             err_msg = """Value Array must be of shape (times, lats, lons).
-Expected shape (%s, %s, %s) but received (%s, %s, %s)""" % (time_count,
-                                                            lat_count,
-                                                            lon_count,
-                                                            values.shape[0],
-                                                            values.shape[1],
-                                                            values.shape[2])
+    Expected shape (%s, %s) but received (%s, %s)""" % (lat_count,
+                                                                lon_count,
+                                                                values.shape[0],
+                                                                values.shape[1])
+        if value_dim == 3 and values.shape != (time_count, lat_count, lon_count):
+            err_msg = """Value Array must be of shape (times, lats, lons).
+    Expected shape (%s, %s, %s) but received (%s, %s, %s)""" % (time_count,
+                                                                lat_count,
+                                                                lon_count,
+                                                                values.shape[0],
+                                                                values.shape[1],
+                                                                values.shape[2])
         if err_msg:
             logger.error(err_msg)
             raise ValueError(err_msg)
@@ -197,7 +212,8 @@
             "lat-range: {}, "
             "lon-range: {}, "
             "time_range: {}, "
-            "var: {}>"
+            "var: {}, "
+            "units: {}>"
         )
 
         return formatted_repr.format(
@@ -205,7 +221,8 @@
             lat_range,
             lon_range,
             time_range,
-            self.variable
+            self.variable,
+            self.units
         )
 
 
@@ -224,7 +241,7 @@
     * Temporal bounds must a valid datetime object
     '''
 
-    def __init__(self, lat_min, lat_max, lon_min, lon_max, start, end):
+    def __init__(self, lat_min, lat_max, lon_min, lon_max, start=None, end=None):
         '''Default Bounds constructor
 
         :param lat_min: The minimum latitude bound.
@@ -239,10 +256,10 @@
         :param lon_max: The maximum longitude bound.
         :type lon_max: :class:`float`
 
-        :param start: The starting datetime bound.
+        :param start: An optional datetime object for the starting datetime bound.
         :type start: :class:`datetime.datetime`
 
-        :param end: The ending datetime bound.
+        :param end: An optional datetime object for the ending datetime bound.
         :type end: :class:`datetime.datetime`
 
         :raises: ValueError
@@ -251,9 +268,17 @@
         self._lat_max = float(lat_max)
         self._lon_min = float(lon_min)
         self._lon_max = float(lon_max)
-        self._start = start
-        self._end = end
 
+        if start:
+            self._start = start
+        else:
+            self._start = None
+
+        if end:
+            self._end = end
+        else:
+            self._end = None
+       
     @property
     def lat_min(self):
         return self._lat_min
@@ -312,10 +337,11 @@
 
     @start.setter
     def start(self, value):
-        if not (type(value) is dt.datetime and value < self._end):
-            error = "Attempted to set start to invalid value: %s" % (value)
-            logger.error(error)
-            raise ValueError(error)
+        if self._end:
+            if not (type(value) is dt.datetime and value < self._end):
+                error = "Attempted to set start to invalid value: %s" % (value)
+                logger.error(error)
+                raise ValueError(error)
 
         self._start = value
 
@@ -325,16 +351,17 @@
 
     @end.setter
     def end(self, value):
-        if not (type(value) is dt.datetime and value > self._start):
-            error = "Attempted to set end to invalid value: %s" % (value)
-            logger.error(error)
-            raise ValueError(error)
+        if self._start:
+            if not (type(value) is dt.datetime and value > self._start):
+                error = "Attempted to set end to invalid value: %s" % (value)
+                logger.error(error)
+                raise ValueError(error)
 
         self._end = value
 
     def __str__(self):
-        lat_range = "({}, {})".format(self._lat_min, self._lon_min)
-        lon_range = "({}, {})".format(self._lon_min, self._lon_min)
+        lat_range = "({}, {})".format(self._lat_min, self._lat_max)
+        lon_range = "({}, {})".format(self._lon_min, self._lon_max)
         time_range = "({}, {})".format(self._start, self._end)
 
         formatted_repr = (
diff --git a/ocw/dataset_processor.py b/ocw/dataset_processor.py
old mode 100644
new mode 100755
index 37296f2..d7b1a3f
--- a/ocw/dataset_processor.py
+++ b/ocw/dataset_processor.py
@@ -29,7 +29,80 @@
 
 logger = logging.getLogger(__name__)
 
-def temporal_rebin(target_dataset, temporal_resolution):
+def temporal_subset(month_start, month_end, target_dataset, average_each_year=False):
+    """ Temporally subset data given month_index.
+
+    :param month_start: An integer for beginning month (Jan=1)
+    :type month_start: :class:`int`
+
+    :param month_end: An integer for ending month (Jan=1)
+    :type month_end: :class:`int`
+
+    :param target_dataset: Dataset object that needs temporal subsetting
+    :type target_dataset: Open Climate Workbench Dataset Object
+
+    :param average_each_year: If True, output dataset is averaged for each year
+    :type average_each_year: :class:'boolean'
+
+    :returns: A temporal subset OCW Dataset
+    :rtype: Open Climate Workbench Dataset Object
+    """
+
+    if month_start > month_end:
+        month_index = range(month_start,13)
+        month_index.extend(range(1, month_end+1))
+    else:
+        month_index = range(month_start, month_end+1)
+
+    dates = target_dataset.times
+    months = np.array([d.month for d in dates])
+    time_index = []
+    for m_value in month_index:
+        time_index = np.append(time_index, np.where(months == m_value)[0])
+        if m_value == month_index[0]:
+            time_index_first = np.min(np.where(months == m_value)[0])
+        if m_value == month_index[-1]:
+            time_index_last = np.max(np.where(months == m_value)[0])
+
+    time_index = np.sort(time_index)
+
+    time_index = time_index[np.where((time_index >= time_index_first) & (time_index <= time_index_last))]
+
+    time_index = list(time_index)
+
+    new_dataset = ds.Dataset(target_dataset.lats,
+                             target_dataset.lons,
+                             target_dataset.times[time_index],
+                             target_dataset.values[time_index,:],
+                             variable=target_dataset.variable,
+                             units=target_dataset.units,
+                             name=target_dataset.name)
+
+    if average_each_year:
+        nmonth = len(month_index)
+        ntime  = new_dataset.times.size
+        nyear = ntime/nmonth
+        averaged_time = []              
+        ny, nx = target_dataset.values.shape[1:]
+        averaged_values =ma.zeros([nyear, ny, nx])
+        for iyear in np.arange(nyear):
+            # centered time index of the season between month_start and month_end in each year
+            center_index = int(nmonth/2)+iyear*nmonth   
+            if nmonth == 1:
+                center_index = iyear
+            averaged_time.append(new_dataset.times[center_index]) 
+            averaged_values[iyear,:] = ma.average(new_dataset.values[nmonth*iyear:nmonth*iyear+nmonth,:], axis=0)
+        new_dataset = ds.Dataset(target_dataset.lats,
+                                 target_dataset.lons,
+                                 np.array(averaged_time),
+                                 averaged_values,
+                                 variable=target_dataset.variable,
+                                 units=target_dataset.units,
+                                 name=target_dataset.name)
+    
+    return new_dataset
+
+def temporal_rebin(target_dataset, temporal_resolution):     
     """ Rebin a Dataset to a new temporal resolution
     
     :param target_dataset: Dataset object that needs temporal rebinned
@@ -61,8 +134,10 @@
                              target_dataset.lons, 
                              binned_dates, 
                              binned_values,
-                             target_dataset.variable,
-                             target_dataset.name)
+                             variable=target_dataset.variable,
+                             units=target_dataset.units,
+                             name=target_dataset.name,
+                             origin=target_dataset.origin)
     
     return new_dataset
 
@@ -116,13 +191,18 @@
                                    new_longitudes, 
                                    target_dataset.times, 
                                    new_values,
-                                   target_dataset.variable,
-                                   target_dataset.name)
+                                   variable=target_dataset.variable,
+                                   units=target_dataset.units,
+                                   name=target_dataset.name,
+                                   origin=target_dataset.origin)
     return regridded_dataset
 
 def ensemble(datasets):
     """
     Generate a single dataset which is the mean of the input datasets
+
+    An ensemble datasets combines input datasets assuming the all have
+    similar shape, dimensions, and units. 
     
     :param datasets: Datasets to be used to compose the ensemble dataset from.
         All Datasets must be the same shape.
@@ -133,18 +213,19 @@
     """
     _check_dataset_shapes(datasets)
     dataset_values = [dataset.values for dataset in datasets]
-    ensemble_values = np.mean(dataset_values, axis=0)
+    ensemble_values = ma.mean(dataset_values, axis=0)
     
     # Build new dataset object from the input datasets and the ensemble values and return it
     ensemble_dataset = ds.Dataset(datasets[0].lats, 
                                   datasets[0].lons, 
                                   datasets[0].times,
                                   ensemble_values,
+                                  units=datasets[0].units,
                                   name="Dataset Ensemble")
     
     return ensemble_dataset
 
-def subset(subregion, target_dataset):
+def subset(subregion, target_dataset, subregion_name=None):
     '''Subset given dataset(s) with subregion information
 
     :param subregion: The Bounds with which to subset the target Dataset. 
@@ -153,18 +234,52 @@
     :param target_dataset: The Dataset object to subset.
     :type target_dataset: :class:`dataset.Dataset`
 
+    :param subregion_name: The subset-ed Dataset name
+    :type subregion_name: :mod:`string`
+
     :returns: The subset-ed Dataset object
     :rtype: :class:`dataset.Dataset`
 
     :raises: ValueError
     '''
 
+    if not subregion.start:
+        subregion.start = target_dataset.times[0] 
+        subregion.end = target_dataset.times[-1]
+        
     # Ensure that the subregion information is well formed
     _are_bounds_contained_by_dataset(subregion, target_dataset)
 
     # Get subregion indices into subregion data
     dataset_slices = _get_subregion_slice_indices(subregion, target_dataset)
 
+    if not subregion_name:
+        subregion_name = target_dataset.name
+
+   # Slice the values array with our calculated slice indices
+    if target_dataset.values.ndim == 2:
+        subset_values = ma.zeros([len(target_dataset.values[
+            dataset_slices["lat_start"]:dataset_slices["lat_end"]]), 
+            len(target_dataset.values[
+                dataset_slices["lon_start"]:dataset_slices["lon_end"]])])
+
+        subset_values = target_dataset.values[
+            dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
+            dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1]
+
+    elif target_dataset.values.ndim == 3:
+        subset_values = ma.zeros([len(target_dataset.values[
+            dataset_slices["time_start"]:dataset_slices["time_end"]]),
+            len(target_dataset.values[
+                dataset_slices["lat_start"]:dataset_slices["lat_end"]]), 
+            len(target_dataset.values[
+                dataset_slices["lon_start"]:dataset_slices["lon_end"]])])
+        
+        subset_values = target_dataset.values[
+            dataset_slices["time_start"]:dataset_slices["time_end"] + 1,
+            dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
+            dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1]
+            
     # Build new dataset with subset information
     return ds.Dataset(
         # Slice the lats array with our calculated slice indices
@@ -177,15 +292,15 @@
         target_dataset.times[dataset_slices["time_start"]: 
                             dataset_slices["time_end"]+ 1],
         # Slice the values array with our calculated slice indices
-        target_dataset.values[
-            dataset_slices["time_start"]:dataset_slices["time_end"] + 1,
-            dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
-            dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1],
-        target_dataset.variable,
-        target_dataset.name
+        subset_values,
+        variable=target_dataset.variable,
+        units=target_dataset.units,
+        name=subregion_name,
+        origin=target_dataset.origin
     )
 
-def safe_subset(subregion, target_dataset):
+
+def safe_subset(subregion, target_dataset, subregion_name=None):
     '''Safely subset given dataset with subregion information
 
     A standard subset requires that the provided subregion be entirely contained
@@ -198,6 +313,9 @@
     :param target_dataset: The Dataset object to subset.
     :type target_dataset: :class:`dataset.Dataset`
 
+    :param subregion_name: The subset-ed Dataset name
+    :type subregion_name: :mod:`string`
+
     :returns: The subset-ed Dataset object
     :rtype: :class:`dataset.Dataset`
     '''
@@ -217,13 +335,15 @@
     if subregion.lon_max > lon_max:
         subregion.lon_max = lon_max
 
-    if subregion.start < start:
-        subregion.start = start
+    if subregion.start: 
+        if subregion.start < start:
+            subregion.start = start
 
-    if subregion.end > end:
-        subregion.end = end
+    if subregion.end:
+        if subregion.end > end:
+            subregion.end = end
 
-    return subset(subregion, target_dataset)
+    return subset(subregion, target_dataset, subregion_name)
 
 def normalize_dataset_datetimes(dataset, timestep):
     ''' Normalize Dataset datetime values.
@@ -247,8 +367,10 @@
         dataset.lons,
         np.array(new_times),
         dataset.values,
-        dataset.variable,
-        dataset.name
+        variable=dataset.variable,
+        units=dataset.units,
+        name=dataset.name,
+        origin=dataset.origin
     )
 
 def write_netcdf(dataset, path, compress=True):
@@ -295,9 +417,146 @@
     lons[:] = dataset.lons
     times[:] = netCDF4.date2num(dataset.times, times.units)
     values[:] = dataset.values
+    values.units = dataset.units
 
     out_file.close()
 
+def write_netcdf_multiple_datasets_with_subregions(ref_dataset, ref_name, 
+                                                   model_dataset_array, model_names,
+                                                   path,
+                                                   subregions = None, subregion_array = None,
+                                                   ref_subregion_mean = None, ref_subregion_std = None, 
+                                                   model_subregion_mean = None, model_subregion_std = None):
+    #Write multiple reference and model datasets and their subregional means and standard deivations in a NetCDF file.
+
+    #:To be updated 
+    #
+    out_file = netCDF4.Dataset(path, 'w', format='NETCDF4')
+
+    dataset = ref_dataset
+    # Set attribute lenghts
+    nobs = 1
+    nmodel = len(model_dataset_array)
+    lat_len = len(dataset.lats)
+    lon_len = len(dataset.lons)
+    time_len = len(dataset.times)
+
+    if not subregions == None:
+        nsubregion = len(subregions)
+                 
+    # Create attribute dimensions
+    lat_dim = out_file.createDimension('y', lat_len)
+    lon_dim = out_file.createDimension('x', lon_len)
+    time_dim = out_file.createDimension('time', time_len)
+
+    # Create variables and store the values
+    lats = out_file.createVariable('lat', 'f8', ('y'))
+    lats[:] = dataset.lats
+    lons = out_file.createVariable('lon', 'f8', ('x'))
+    lons[:] = dataset.lons
+    times = out_file.createVariable('time', 'f8', ('time',))
+    times.units = "days since %s" % dataset.times[0]
+    times[:] = netCDF4.date2num(dataset.times, times.units)
+
+    #mask_array = np.zeros([time_len, lat_len, lon_len])
+    #for iobs in np.arange(nobs):
+    #    index = np.where(ref_dataset_array[iobs].values.mask[:] == True)
+    #    mask_array[index] = 1
+    out_file.createVariable(ref_name, 'f8', ('time','y','x'))
+    out_file.variables[ref_name][:] = ref_dataset.values
+    out_file.variables[ref_name].units = ref_dataset.units
+    for imodel in np.arange(nmodel):
+        out_file.createVariable(model_names[imodel], 'f8', ('time','y','x'))
+        #out_file.variables[model_names[imodel]][:] = ma.array(model_dataset_array[imodel].values, mask = mask_array)
+        out_file.variables[model_names[imodel]][:] = model_dataset_array[imodel].values
+        out_file.variables[model_names[imodel]].units = model_dataset_array[imodel].units
+
+    if not subregions == None:
+        out_file.createVariable('subregion_array', 'i4', ('y','x'))
+        out_file.variables['subregion_array'][:] = subregion_array[:]
+        nsubregion = len(subregions)
+        out_file.createDimension('nsubregion', nsubregion)
+        out_file.createDimension('nobs', nobs)
+        out_file.createDimension('nmodel', nmodel)
+        out_file.createVariable('obs_subregion_mean', 'f8', ('nobs','time','nsubregion'))
+        out_file.variables['obs_subregion_mean'][:] = ref_subregion_mean[:]
+        out_file.createVariable('obs_subregion_std', 'f8', ('nobs','time','nsubregion'))
+        out_file.variables['obs_subregion_std'][:] = ref_subregion_std[:]
+        out_file.createVariable('model_subregion_mean', 'f8', ('nmodel','time','nsubregion'))
+        out_file.variables['model_subregion_mean'][:] = model_subregion_mean[:]
+        out_file.createVariable('model_subregion_std', 'f8', ('nmodel','time','nsubregion'))
+        out_file.variables['model_subregion_std'][:] = model_subregion_std[:]
+
+    out_file.close()
+
+def water_flux_unit_conversion(dataset):
+    ''' Convert water flux variables units as necessary
+
+    Convert full SI units water flux units to more common units.
+
+    :param dataset: The dataset to convert.
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: A Dataset with values converted to new units.
+    :rtype: :class:`dataset.Dataset`
+    '''
+    water_flux_variables = ['pr', 'prec','evspsbl', 'mrro', 'swe']
+    variable = dataset.variable.lower()
+
+    if any(sub_string in variable for sub_string in water_flux_variables):
+        dataset_units = dataset.units.lower()
+        if variable in 'swe':
+            if any(unit in dataset_units for unit in ['m', 'meter']):
+                dataset.values = 1.e3 * dataset.values
+                dataset.units = 'km'
+        else:
+            if any(unit in dataset_units 
+                for unit in ['kg m-2 s-1', 'mm s-1', 'mm/sec']):
+                dataset.values = 86400. * dataset.values
+                dataset.units = 'mm/day'
+
+    return dataset
+
+def temperature_unit_conversion(dataset):
+    ''' Convert temperature units as necessary
+
+    Automatically convert Celcius to Kelvin in the given dataset.
+
+    :param dataset: The dataset for which units should be updated.
+    :type dataset; :class:`dataset.Dataset`
+
+    :returns: The dataset with (potentially) updated units.
+    :rtype: :class:`dataset.Dataset`
+    '''
+    temperature_variables = ['temp','tas','tasmax','taxmin','T']
+    variable = dataset.variable.lower()
+
+    if any(sub_string in variable for sub_string in temperature_variables):
+        dataset_units = dataset.units.lower()
+        if dataset_units == 'c':
+            dataset.values = 273.15 + dataset.values
+            dataset.units = 'K'
+
+    return dataset
+
+def variable_unit_conversion(dataset):
+    ''' Convert water flux or temperature variables units as necessary
+    
+    For water flux variables, convert full SI units water flux units to more common units.
+    For temperature, convert Celcius to Kelvin.
+
+    :param dataset: The dataset to convert.
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: A Dataset with values converted to new units.
+    :rtype: :class:`dataset.Dataset`
+    '''
+
+    dataset = water_flux_unit_conversion(dataset)
+    dataset = temperature_unit_conversion(dataset)
+    
+    return dataset
+
 def _rcmes_normalize_datetimes(datetimes, timestep):
     """ Normalize Dataset datetime values.
 
@@ -333,6 +592,24 @@
 
     return normalDatetimes
 
+def mask_missing_data(dataset_array):
+    ''' Check missing values in observation and model datasets.
+    If any of dataset in dataset_array has missing values at a grid point,
+    the values at the grid point in all other datasets are masked.
+    :param dataset_array: an array of OCW datasets
+    '''
+    mask_array = np.zeros(dataset_array[0].values.shape)
+    for dataset in dataset_array:
+        index = np.where(dataset.values.mask == True)
+        if index[0].size >0:
+            mask_array[index] = 1
+    masked_array = []
+    for dataset in dataset_array:
+        dataset.values = ma.array(dataset.values, mask=mask_array)
+        masked_array.append(dataset)
+    return [masked_dataset for masked_dataset in masked_array]
+
+
 def _rcmes_spatial_regrid(spatial_values, lat, lon, lat2, lon2, order=1):
     '''
     Spatial regrid from one set of lat,lon values onto a new set (lat2,lon2)
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index 96c19ad..ff9ad33 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -25,6 +25,8 @@
 from dataset import Dataset, Bounds
 import ocw.dataset_processor as DSP
 
+import numpy.ma as ma
+
 logger = logging.getLogger(__name__)
 
 class Evaluation(object):
@@ -234,7 +236,10 @@
                 self.results = self._run_no_subregion_evaluation()
 
         if self._should_run_unary_metrics():
-            self.unary_results = self._run_unary_metric_evaluation()
+            if self.subregions:
+                self.unary_results = self._run_subregion_unary_evaluation()
+            else:
+                self.unary_results = self._run_unary_metric_evaluation()
 
     def _evaluation_is_valid(self):
         '''Check if the evaluation is well-formed.
@@ -257,7 +262,7 @@
         elif run_unary:
             return unary_valid
         else:
-            return false
+            return False
 
     def _should_run_regular_metrics(self):
         return len(self.metrics) > 0
@@ -267,19 +272,22 @@
 
     def _run_subregion_evaluation(self):
         results = []
+        new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
+
         for target in self.target_datasets:
             results.append([])
+            new_targets = [DSP.subset(s, target) for s in self.subregions]
+
             for metric in self.metrics:
                 results[-1].append([])
-                for subregion in self.subregions:
-                    # Subset the reference and target dataset with the 
-                    # subregion information.
-                    new_ref = DSP.subset(subregion, self.ref_dataset)
-                    new_tar = DSP.subset(subregion, target)
+
+                for i in range(len(self.subregions)):
+                    new_ref = new_refs[i]
+                    new_tar = new_targets[i]
 
                     run_result = metric.run(new_ref, new_tar)
                     results[-1][-1].append(run_result)
-        return results
+        return convert_evaluation_result(results, subregion=True)
 
     def _run_no_subregion_evaluation(self):
         results = []
@@ -288,7 +296,7 @@
             for metric in self.metrics:
                 run_result = metric.run(self.ref_dataset, target)
                 results[-1].append(run_result)
-        return results
+        return convert_evaluation_result(results)
 
     def _run_unary_metric_evaluation(self):
         unary_results = []
@@ -300,7 +308,31 @@
 
             for target in self.target_datasets:
                 unary_results[-1].append(metric.run(target))
-        return unary_results
+        return convert_unary_evaluation_result(unary_results)
+
+    def _run_subregion_unary_evaluation(self):
+        unary_results = []
+        if self.ref_dataset:
+            new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
+
+        new_targets = [
+            [DSP.subset(s, t) for s in self.subregions]
+            for t in self.target_datasets
+        ]
+
+        for metric in self.unary_metrics:
+            unary_results.append([])
+
+            for i in range(len(self.subregions)):
+                unary_results[-1].append([])
+
+                if self.ref_dataset:
+                    unary_results[-1][-1].append(metric.run(new_refs[i]))
+
+                for t in range(len(self.target_datasets)):
+                    unary_results[-1][-1].append(metric.run(new_targets[t][i]))
+
+        return convert_unary_evaluation_result(unary_results, subregion = True)
 
     def __str__(self):
         formatted_repr = (
@@ -319,3 +351,65 @@
             str(self.subregions)
         )
 
+def convert_evaluation_result(evaluation_result, subregion = False):
+    if not subregion:
+        nmodel = len(evaluation_result)
+        nmetric = len(evaluation_result[0])
+        results = [] 
+        for imetric in range(nmetric):
+            result_shape = list(evaluation_result[0][imetric].shape)
+            result_shape.insert(0, nmodel)
+            result = ma.zeros(result_shape)
+            for imodel in range(nmodel):
+                result[imodel,:] = evaluation_result[imodel][imetric]
+            results.append(result)
+        return results
+    else:
+        nmodel = len(evaluation_result)
+        nmetric = len(evaluation_result[0])
+        nsubregion = len(evaluation_result[0][0])
+
+        results = []
+        for isubregion in range(nsubregion):
+            subregion_results = []
+            for imetric in range(nmetric):
+                result_shape = list(evaluation_result[0][imetric][isubregion].shape)
+                result_shape.insert(0, nmodel)
+                result = ma.zeros(result_shape)
+                for imodel in range(nmodel):
+                    result[imodel,:] = evaluation_result[imodel][imetric][isubregion]
+                subregion_results.append(result)
+            results.append(subregion_results)
+        return results
+             
+def convert_unary_evaluation_result(evaluation_result, subregion = False):
+    if not subregion:
+        nmetric = len(evaluation_result)
+        nmodel = len(evaluation_result[0])
+        results = []
+        for imetric in range(nmetric):
+            result_shape = list(evaluation_result[imetric][0].shape)
+            result_shape.insert(0, nmodel)
+            result = ma.zeros(result_shape)
+            for imodel in range(nmodel):
+                result[imodel,:] = evaluation_result[imetric][imodel]
+            results.append(result)
+        return results
+    else:
+        nmetric = len(evaluation_result)
+        nsubregion = len(evaluation_result[0])
+        nmodel = len(evaluation_result[0][0])
+
+        results = []
+        for isubregion in range(nsubregion):
+            subregion_results = []
+            for imetric in range(nmetric):
+                result_shape = list(evaluation_result[imetric][isubregion][0].shape)
+                result_shape.insert(0, nmodel)
+                result = ma.zeros(result_shape)
+                for imodel in range(nmodel):
+                    result[imodel,:] = evaluation_result[imetric][isubregion][imodel]
+                subregion_results.append(result)
+            results.append(subregion_results)
+        return results
+
diff --git a/ocw/metrics.py b/ocw/metrics.py
index 855d4e3..62f4e3d 100644
--- a/ocw/metrics.py
+++ b/ocw/metrics.py
@@ -23,7 +23,8 @@
 from abc import ABCMeta, abstractmethod
 import ocw.utils as utils
 import numpy
-from scipy import stats
+import numpy.ma as ma
+from scipy.stats import mstats
 
 class Metric(object):
     '''Base Metric Class'''
@@ -86,7 +87,28 @@
         :returns: The difference between the reference and target datasets.
         :rtype: :class:`numpy.ndarray`
         '''
-        return ref_dataset.values - target_dataset.values
+        return calc_bias(target_dataset.values,ref_dataset.values) 
+
+class SpatialPatternTaylorDiagram(BinaryMetric):
+    ''' Calculate the target to reference ratio of spatial standard deviation and pattern correlation'''
+
+    def run(self, ref_dataset, target_dataset):
+        '''Calculate two metrics to plot a Taylor diagram to compare spatial patterns      
+
+        .. note::
+           Overrides BinaryMetric.run() 
+        
+        :param ref_dataset: The reference dataset to use in this metric run.
+        :type ref_dataset: :class:`dataset.Dataset`
+
+        :param target_dataset: The target dataset to evaluate against the
+            reference dataset in this metric run.
+        :type target_dataset: :class:`dataset.Dataset`
+
+        :returns: standard deviation ratio, pattern correlation coefficient
+        :rtype: :float:'float','float' 
+        '''
+        return ma.array([calc_stddev_ratio(target_dataset.values, ref_dataset.values), calc_correlation(target_dataset.values, ref_dataset.values)])
 
 
 class TemporalStdDev(UnaryMetric):
@@ -105,7 +127,7 @@
         :returns: The temporal standard deviation of the target dataset
         :rtype: :class:`ndarray`
         '''
-        return target_dataset.values.std(axis=0, ddof=1)
+        return calc_stddev(target_dataset.values, axis=0)
 
 
 class StdDevRatio(BinaryMetric):
@@ -126,7 +148,8 @@
 
         :returns: The standard deviation ratio of the reference and target
         '''
-        return target_dataset.values.std() / ref_dataset.values.std()
+       
+        return calc_stddev_ratio(target_dataset.values, ref_dataset.values)
 
 
 class PatternCorrelation(BinaryMetric):
@@ -150,7 +173,8 @@
         # stats.pearsonr returns correlation_coefficient, 2-tailed p-value
         # We only care about the correlation coefficient
         # Docs at http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
-        return stats.pearsonr(ref_dataset.values.flatten(), target_dataset.values.flatten())[0]
+
+        return calc_correlation(target_dataset.values, ref_dataset.values)
 
 
 class TemporalCorrelation(BinaryMetric):
@@ -177,24 +201,19 @@
             coefficients
         '''
         num_times, num_lats, num_lons = reference_dataset.values.shape
-        coefficients = numpy.zeros([num_lats, num_lons])
-        levels = numpy.zeros([num_lats, num_lons])
+        coefficients = ma.zeros([num_lats, num_lons])
         for i in numpy.arange(num_lats):
             for j in numpy.arange(num_lons):
-                coefficients[i, j], levels[i, j] = (
-                    stats.pearsonr(
-                        reference_dataset.values[:, i, j],
-                        target_dataset.values[:, i, j]
-                    )
-                )
-                levels[i, j] = 1 - levels[i, j]
-        return coefficients, levels 
+                coefficients[i, j] = calc_correlation(
+                        target_dataset.values[:, i, j],
+                        reference_dataset.values[:, i, j])
+        return coefficients 
 
 
 class TemporalMeanBias(BinaryMetric):
     '''Calculate the bias averaged over time.'''
 
-    def run(self, ref_dataset, target_dataset, absolute=False):
+    def run(self, ref_dataset, target_dataset):
         '''Calculate the bias averaged over time.
 
         .. note::
@@ -210,37 +229,7 @@
         :returns: The mean bias between a reference and target dataset over time.
         '''
 
-        diff = ref_dataset.values - target_dataset.values
-        if absolute:
-            diff = abs(diff)
-        mean_bias = diff.mean(axis=0)
-
-        return mean_bias
-
-
-class SpatialMeanOfTemporalMeanBias(BinaryMetric):
-    '''Calculate the bias averaged over time and domain.'''
-
-    def run(self, reference_dataset, target_dataset):
-        '''Calculate the bias averaged over time and domain.
-
-        .. note::
-           Overrides BinaryMetric.run()
-
-        :param reference_dataset: The reference dataset to use in this metric
-            run
-        :type reference_dataset: :class:`dataset.Dataset`
-
-        :param target_dataset: The target dataset to evaluate against the
-            reference dataset in this metric run
-        :type target_dataset: :class:`dataset.Dataset`
-
-        :returns: The bias averaged over time and domain
-        '''
-
-        bias = reference_dataset.values - target_dataset.values
-        return bias.mean()
-
+        return calc_bias(target_dataset.values,ref_dataset.values, average_over_time=True) 
 
 class RMSError(BinaryMetric):
     '''Calculate the Root Mean Square Difference (RMS Error), with the mean
@@ -264,6 +253,96 @@
         :returns: The RMS error, with the mean calculated over time and space
         '''
 
-        sqdiff = (reference_dataset.values - target_dataset.values) ** 2
-        return numpy.sqrt(sqdiff.mean())
+        return calc_rmse(target_dataset.values, reference_dataset.values)
 
+def calc_bias(target_array, reference_array, average_over_time = False):
+    ''' Calculate difference between two arrays
+
+    :param target_array: an array to be evaluated, as model output
+    :type target_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param reference_array: an array of reference dataset
+    :type reference_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param average_over_time: if True, calculated bias is averaged for the axis=0
+    :type average_over_time: 'bool'
+
+    :returns: Biases array of the target dataset
+    :rtype: :class:'numpy.ma.core.MaskedArray'
+    '''
+    
+    bias = target_array - reference_array
+    if average_over_time:
+        return ma.average(bias, axis=0)
+    else:
+        return bias
+
+def calc_stddev(array, axis=None):
+    ''' Calculate a sample standard deviation of an array along the array
+
+    :param array: an array to calculate sample standard deviation
+    :type array: :class:'numpy.ma.core.MaskedArray'
+    
+    :param axis: Axis along which the sample standard deviation is computed.
+    :type axis: 'int'
+
+    :returns: sample standard deviation of array
+    :rtype: :class:'numpy.ma.core.MaskedArray'
+    '''
+
+    if isinstance(axis, int):
+        return ma.std(array, axis=axis, ddof=1)
+    else:
+        return ma.std(array, ddof=1)
+        
+
+def calc_stddev_ratio(target_array, reference_array):
+    ''' Calculate ratio of standard deivations of the two arrays
+
+    :param target_array: an array to be evaluated, as model output
+    :type target_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param reference_array: an array of reference dataset
+    :type reference_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param average_over_time: if True, calculated bias is averaged for the axis=0
+    :type average_over_time: 'bool'
+
+    :returns: (standard deviation of target_array)/(standard deviation of reference array)
+    :rtype: :class:'float'
+    '''
+
+    return calc_stddev(target_array)/calc_stddev(reference_array)
+
+def calc_correlation(target_array, reference_array):
+    '''Calculate the correlation coefficient between two arrays.
+
+    :param target_array: an array to be evaluated, as model output
+    :type target_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param reference_array: an array of reference dataset
+    :type reference_array: :class:'numpy.ma.core.MaskedArray'
+
+    :returns: pearson's correlation coefficient between the two input arrays
+    :rtype: :class:'numpy.ma.core.MaskedArray'
+    '''
+
+    return mstats.pearsonr(reference_array.flatten(), target_array.flatten())[0]  
+       
+def calc_rmse(target_array, reference_array):
+    ''' Calculate ratio of standard deivations of the two arrays
+
+    :param target_array: an array to be evaluated, as model output
+    :type target_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param reference_array: an array of reference dataset
+    :type reference_array: :class:'numpy.ma.core.MaskedArray'
+
+    :param average_over_time: if True, calculated bias is averaged for the axis=0
+    :type average_over_time: 'bool'
+
+    :returns: root mean square error
+    :rtype: :class:'float'
+    '''
+
+    return (ma.mean((calc_bias(target_array, reference_array))**2))**0.5 
diff --git a/ocw/plotter.py b/ocw/plotter.py
old mode 100644
new mode 100755
index 337cfd3..72376bd
--- a/ocw/plotter.py
+++ b/ocw/plotter.py
@@ -238,7 +238,7 @@
     ''' Draw subregion domain(s) on a map.
 
     :param subregions: The subregion objects to plot on the map.
-    :type subregions: :class:`list` of subregion objects
+    :type subregions: :class:`list` of subregion objects (Bounds objects)
 
     :param lats: Array of latitudes values.
     :type lats: :class:`numpy.ndarray`
@@ -322,8 +322,8 @@
 
         nlats, nlons = domain.shape
         domain = ma.masked_equal(domain, 0)
-        reglats = np.linspace(reg.latmin, reg.latmax, nlats)
-        reglons = np.linspace(reg.lonmin, reg.lonmax, nlons)
+        reglats = np.linspace(reg.lat_min, reg.lat_max, nlats)
+        reglons = np.linspace(reg.lon_min, reg.lon_max, nlons)
         reglons, reglats = np.meshgrid(reglons, reglats)
 
         # Convert to to projection coordinates. Not really necessary
@@ -336,7 +336,7 @@
 
         # Label the subregion
         xm, ym = x.mean(), y.mean()
-        m.plot(xm, ym, marker='$%s$' %(reg.name), markersize=12, color='k')
+        m.plot(xm, ym, marker='$%s$' %("R"+str(i+1)), markersize=12, color='k')
 
     # Add the title
     ax.set_title(ptitle)
@@ -373,7 +373,7 @@
     :param xlabel: (Optional) x-axis title.
     :type xlabel: :mod:`string`
     
-    :param ylabel: (Optional) y-ayis title.
+    :param ylabel: (Optional) y-axis title.
     :type ylabel: :mod:`string`
 
     :param ptitle: (Optional) plot title.
@@ -488,13 +488,93 @@
     fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+def draw_barchart(results, yvalues, fname, ptitle='', fmt='png', 
+                     xlabel='', ylabel=''):
+    ''' Draw a barchart.
+
+    :param results: 1D array of  data.
+    :type results: :class:`numpy.ndarray`
+
+    :param yvalues: List of y-axis labels
+    :type times: :class:`list` 
+
+    :param fname: Filename of the plot.
+    :type fname: :mod:`string`
+
+    :param ptitle: (Optional) plot title.
+    :type ptitle: :mod:`string`
+
+    :param fmt: (Optional) filetype for the output.
+    :type fmt: :mod:`string`
+
+    :param xlabel: (Optional) x-axis title.
+    :type xlabel: :mod:`string`
+    
+    :param ylabel: (Optional) y-axis title.
+    :type ylabel: :mod:`string`
+
+    '''
+
+    y_pos = list(range(len(yvalues))) 
+    fig = plt.figure() 
+    fig.set_size_inches((11., 8.5))
+    fig.dpi = 300
+    ax = plt.subplot(111)
+    plt.barh(y_pos, results, align="center", height=0.8, linewidth=0)
+    plt.yticks(y_pos, yvalues)
+    plt.tick_params(axis="both", which="both", bottom="on", top="off",labelbottom="on", left="off", right="off", labelleft="on")
+    ax.spines["top"].set_visible(False)
+    ax.spines["right"].set_visible(False)
+
+    ymin = min(y_pos) 
+    ymax = max(y_pos)
+    ymin = min((ymin - ((ymax - ymin) * 0.1)/2),0.5) 
+    ymax = ymax + ((ymax - ymin) * 0.1)
+    ax.set_ylim((ymin, ymax))
+    plt.xlabel(xlabel)
+    plt.tight_layout()
+       
+    # Save the figure
+    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.clf()
+
+def draw_marker_on_map(lat, lon, fname, fmt='png', location_name=' ',gridshape=(1,1)):
+    '''
+    Purpose::
+        Draw a marker on a map
+
+    Input::
+        lat - latitude for plotting a marker
+        lon - longitude for plotting a marker
+        fname  - a string specifying the filename of the plot
+    '''   
+    fig = plt.figure()
+    fig.dpi = 300
+    ax = fig.add_subplot(111)
+    
+    m = Basemap(projection='cyl', resolution = 'c', llcrnrlat =lat-30, urcrnrlat = lat+30, llcrnrlon = lon-60, urcrnrlon = lon+60)
+    m.drawcoastlines(linewidth=1)
+    m.drawcountries(linewidth=1)
+    m.drawmapboundary(fill_color='aqua')
+    m.fillcontinents(color='coral',lake_color='aqua')
+    m.ax = ax
+   
+    xpt,ypt = m(lon,lat)
+    m.plot(xpt,ypt,'bo')  # plot a blue dot there
+    # put some text next to the dot, offset a little bit
+    # (the offset is in map projection coordinates)
+    plt.text(xpt+0.5, ypt+1.5,location_name+'\n(lon: %5.1f, lat: %3.1f)' % (lon, lat)) 
+                       
+    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.clf()
+
 def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
                      clabel='', ptitle='', subtitles=None, cmap=None,
                      clevs=None, nlevs=10, parallels=None, meridians=None,
                      extend='neither', aspect=8.5/2.5):
     ''' Draw a multiple panel contour map plot.
 
-    :param dataset: 3D array of data to be plotted with shape (nT, nLon, nLat).
+    :param dataset: 3D array of data to be plotted with shape (nT, nLat, nLon).
     :type dataset: :class:`numpy.ndarray`
 
     :param lats: Array of latitudes values.
@@ -941,3 +1021,36 @@
         r = np.linspace(std1, std2)
 
         return self.ax.plot(t,r,'red',linewidth=2)
+
+def draw_histogram(dataset_array, data_names, fname, fmt='png', nbins=10):
+    '''
+    Purpose::
+        Draw histograms
+
+    Input::
+        dataset_array - a list of data values [data1, data2, ....]
+        data_names    - a list of data names  ['name1','name2',....]
+        fname  - a string specifying the filename of the plot
+        bins - number of bins
+    '''    
+    fig = plt.figure()
+    fig.dpi = 300
+    ndata = len(dataset_array)
+   
+    data_min = 500.
+    data_max = 0.
+ 
+    for data in dataset_array:
+        data_min = np.min([data_min,data.min()]) 
+        data_max = np.max([data_max,data.max()]) 
+
+    bins = np.linspace(np.round(data_min), np.round(data_max+1), nbins)
+    for idata,data in enumerate(dataset_array):
+        ax = fig.add_subplot(ndata, 1, idata+1)
+        ax.hist(data, bins, alpha = 0.5, label=data_names[idata], normed = True)
+        leg = ax.legend()
+        leg.get_frame().set_alpha(0.5)
+        ax.set_xlim([data_min-(data_max-data_min)*0.15, data_max+(data_max-data_min)*0.15])
+        
+    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+ 
diff --git a/ocw/statistical_downscaling.py b/ocw/statistical_downscaling.py
new file mode 100755
index 0000000..75e2adc
--- /dev/null
+++ b/ocw/statistical_downscaling.py
@@ -0,0 +1,111 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+import ocw.utils as utils
+import numpy as np
+from scipy.stats import percentileofscore, linregress
+
+class Downscaling:
+    def __init__(self, ref_dataset, model_present, model_future):
+        '''
+        :param ref_dataset: The Dataset to use as the reference dataset (observation)
+        :type ref_dataset: Dataset
+        :param model_present: model simulation to be compared with observation
+        :type model_present: Dataset
+        :param model_future: model simulation to be calibrated for prediction
+        :type model_future: Dataset
+        '''
+        self.ref_dataset = ref_dataset[~ref_dataset.mask].ravel()
+        self.model_present = model_present.ravel()
+        self.model_future = model_future.ravel()
+             
+    description = "statistical downscaling methods"
+
+    def Delta_addition(self):
+        '''Calculate the mean difference between future and present simulation, 
+           then add the difference to the observed distribution
+
+        :returns: downscaled model_present and model_future
+        ''' 
+        ref = self.ref_dataset 
+        model_present = self.model_present 
+        model_future = self.model_future 
+
+        return model_present, ref + np.mean(model_future-model_present)
+
+    def Delta_correction(self):
+        '''Calculate the mean difference between observation and present simulation,
+           then add the difference to the future distribution
+
+        :returns: downscaled model_present and model_future
+        '''
+        ref = self.ref_dataset
+        model_present = self.model_present
+        model_future = self.model_future
+
+        return model_present+np.mean(ref) - np.mean(model_present), model_future + np.mean(ref) - np.mean(model_present)
+
+    def Quantile_mapping(self):
+        '''Remove the biases for each quantile value 
+        Wood et al (2004) HYDROLOGIC IMPLICATIONS OF DYNAMICAL AND STATISTICAL APPROACHES TO DOWNSCALING CLIMATE MODEL OUTPUTS
+
+        :returns: downscaled model_present and model_future
+        '''
+        ref = self.ref_dataset
+        model_present = self.model_present
+        model_present_corrected = np.zeros(model_present.size)
+        model_future = self.model_future
+        model_future_corrected = np.zeros(model_future.size)
+
+
+        for ival, model_value in enumerate(model_present):
+            percentile = percentileofscore(model_present, model_value)
+            model_present_corrected[ival] = np.percentile(ref, percentile) 
+
+        for ival, model_value in enumerate(model_future):
+            percentile = percentileofscore(model_future, model_value)
+            model_future_corrected[ival] = model_value + np.percentile(ref, percentile) - np.percentile(model_present, percentile) 
+
+        return model_present_corrected, model_future_corrected     
+
+    def Asynchronous_regression(self):
+        '''Remove the biases by fitting a linear regression model with ordered observational and model datasets
+        Stoner et al (2013) An asynchronous regional regression model for statistical downscaling of daily climate variables    
+
+        :returns: downscaled model_present and model_future
+        '''
+
+        ref_original = self.ref_dataset
+        model_present = self.model_present
+        model_present_sorted = np.sort(model_present)
+        model_future = self.model_future
+ 
+        ref = np.zeros(model_present.size)   # For linear regression, the size of reference data must be same as model data. 
+
+        for ival, model_value in enumerate(model_present_sorted):
+            percentile = percentileofscore(model_present_sorted, model_value)
+            ref[ival] = np.percentile(ref_original, percentile)       
+
+        slope, intercept = linregress(model_present_sorted, ref)[0:2] 
+        
+        return model_present*slope+intercept, model_future*slope+intercept
+
+
+
+
+
diff --git a/ocw/tests/test_dap.py b/ocw/tests/test_dap.py
index be01f6c..db0d4c0 100644
--- a/ocw/tests/test_dap.py
+++ b/ocw/tests/test_dap.py
@@ -23,7 +23,9 @@
 class TestDap(unittest.TestCase):
     @classmethod
     def setup_class(self):
-        self.dataset = dap.load('http://test.opendap.org/dap/data/nc/sst.mnmean.nc.gz', 'sst', name='foo')
+        self.url = 'http://test.opendap.org/dap/data/nc/sst.mnmean.nc.gz'
+        self.name = 'foo'
+        self.dataset = dap.load(self.url, 'sst', name=self.name)
 
     def test_dataset_is_returned(self):
         self.assertTrue(isinstance(self.dataset, Dataset))
@@ -42,7 +44,11 @@
         self.assertTrue(start == self.dataset.times[0])
 
     def test_custom_dataset_name(self):
-        self.assertEquals(self.dataset.name, 'foo')
+        self.assertEquals(self.dataset.name, self.name)
+
+    def test_dataset_origin(self):
+        self.assertEquals(self.dataset.origin['source'], 'dap')
+        self.assertEquals(self.dataset.origin['url'], self.url)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/ocw/tests/test_dataset.py b/ocw/tests/test_dataset.py
index 94d7740..777d07e 100644
--- a/ocw/tests/test_dataset.py
+++ b/ocw/tests/test_dataset.py
@@ -30,8 +30,15 @@
         flat_array = np.array(range(300))
         self.value = flat_array.reshape(12, 5, 5)
         self.variable = 'prec'
-        self.test_dataset = Dataset(self.lat, self.lon, self.time, 
-                                    self.value, self.variable)
+        self.name = 'foo'
+        self.origin = {'path': '/a/fake/file/path'}
+        self.test_dataset = Dataset(self.lat,
+                                    self.lon,
+                                    self.time,
+                                    self.value,
+                                    variable=self.variable,
+                                    name=self.name,
+                                    origin=self.origin)
 
     def test_lats(self):
         self.assertItemsEqual(self.test_dataset.lats, self.lat)
@@ -48,6 +55,12 @@
     def test_variable(self):
         self.assertEqual(self.test_dataset.variable, self.variable)
 
+    def test_name(self):
+        self.assertEqual(self.test_dataset.name, self.name)
+
+    def test_origin(self):
+        self.assertEqual(self.test_dataset.origin, self.origin)
+
 class TestInvalidDatasetInit(unittest.TestCase):
     def setUp(self):
         self.lat = np.array([10, 12, 14, 16, 18])
@@ -73,7 +86,7 @@
             Dataset(self.lat, self.lon, self.time, self.value, 'prec')
 
     def test_bad_values_shape(self):
-        self.value = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
+        self.value = np.array([1, 2, 3, 4, 5])
         with self.assertRaises(ValueError):
             Dataset(self.lat, self.lon, self.time, self.value, 'prec')
 
diff --git a/ocw/tests/test_dataset_processor.py b/ocw/tests/test_dataset_processor.py
index 5e62aa8..9cb60ce 100644
--- a/ocw/tests/test_dataset_processor.py
+++ b/ocw/tests/test_dataset_processor.py
@@ -174,6 +174,7 @@
 class TestSubset(unittest.TestCase):
     def setUp(self):
         self.target_dataset = ten_year_monthly_dataset()
+        self.name = 'foo'
 
         self.subregion = ds.Bounds(
             -81, 81, 
@@ -202,7 +203,16 @@
         self.assertEqual(subset.lons.shape[0], 162)
         self.assertEqual(subset.times.shape[0], 37)
         self.assertEqual(subset.values.shape, (37, 82, 162))
-    
+
+    def test_subset_name(self):
+        subset = dp.subset(self.subregion, self.target_dataset)
+        self.assertEqual(subset.name, self.name)
+
+    def test_subset_name_propagation(self):
+        subset_name = 'foo_subset_name'
+        subset = dp.subset(self.subregion, self.target_dataset,subset_name)
+        self.assertEqual(subset.name, subset_name)
+
     def test_subset_using_non_exact_spatial_bounds(self):
         index_slices = dp._get_subregion_slice_indices(self.non_exact_spatial_subregion,  self.target_dataset)
         control_index_slices = {"lat_start"  : 5,
@@ -236,6 +246,7 @@
                                          times,
                                          values,
                                          variable="test variable name",
+                                         units='test variable units',
                                          name='foo')
 
         self.spatial_out_of_bounds = ds.Bounds(
@@ -364,7 +375,7 @@
     # Ten Years of monthly data
     times = np.array([datetime.datetime(year, month, 1) for year in range(2000, 2010) for month in range(1, 13)])
     values = np.ones([len(times), len(lats), len(lons)])
-    input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name", name='foo')
+    input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name", units='test variable units', name='foo')
     return input_dataset
 
 def ten_year_monthly_15th_dataset():
@@ -373,7 +384,7 @@
     # Ten Years of monthly data
     times = np.array([datetime.datetime(year, month, 15) for year in range(2000, 2010) for month in range(1, 13)])
     values = np.ones([len(times), len(lats), len(lons)])
-    input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name")
+    input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name", units='test variable units')
     return input_dataset
 
 def two_year_daily_dataset():
@@ -381,7 +392,7 @@
     lons = np.array(range(-179, 180, 2))
     times = np.array([datetime.datetime(2001, 1, 1) + datetime.timedelta(days=d) for d in range(730)])
     values = np.ones([len(times), len(lats), len(lons)])
-    dataset = ds.Dataset(lats, lons, times, values, variable='random data')
+    dataset = ds.Dataset(lats, lons, times, values, variable='random data',units='test variable units')
     return dataset    
 
 def two_year_daily_2hr_dataset():
@@ -389,7 +400,7 @@
     lons = np.array(range(-179, 180, 2))
     times = np.array([datetime.datetime(2001, 1, 1) + datetime.timedelta(days=d, hours=2) for d in range(730)])
     values = np.ones([len(times), len(lats), len(lons)])
-    dataset = ds.Dataset(lats, lons, times, values, variable='random data')
+    dataset = ds.Dataset(lats, lons, times, values, variable='random data', units='test variable units')
     return dataset    
 
 def build_ten_cube_dataset(value):
diff --git a/ocw/tests/test_evaluation.py b/ocw/tests/test_evaluation.py
index 9e8d126..2d48e72 100644
--- a/ocw/tests/test_evaluation.py
+++ b/ocw/tests/test_evaluation.py
@@ -125,5 +125,83 @@
         bias_results_shape = tuple(bias_eval.results[0][0].shape)
         self.assertEqual(input_shape, bias_results_shape)
 
+    def test_result_shape(self):
+        bias_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
+            [Bias(), Bias()]
+        )
+        bias_eval.run()
+
+        # Expected result shape is
+        # [bias, bias] where bias.shape[0] = number of datasets
+        self.assertTrue(len(bias_eval.results) == 2)
+        self.assertTrue(bias_eval.results[0].shape[0] == 3)
+
+    def test_unary_result_shape(self):
+        new_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
+            [TemporalStdDev()]
+        )
+        new_eval.run()
+
+        # Expected result shape is
+        # [stddev] where stddev.shape[0] = number of datasets
+        
+        self.assertTrue(len(new_eval.unary_results) == 1)
+        self.assertTrue(new_eval.unary_results[0].shape[0] == 5)
+
+    def test_subregion_result_shape(self):
+        bound = Bounds(
+                10, 18, 
+                100, 108, 
+                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
+
+        bias_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [Bias()],
+            [bound]
+        )
+        bias_eval.run()
+
+        # Expected result shape is
+        # [
+        #       [   # Subregions cause this extra layer
+        #           [number of targets, bias.run(reference, target1).shape]
+        #       ]
+        #   ],
+        self.assertTrue(len(bias_eval.results) == 1)
+        self.assertTrue(len(bias_eval.results[0]) == 1)
+        self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
+        self.assertTrue(type(bias_eval.results) == type([]))
+
+    def test_subregion_unary_result_shape(self):
+        bound = Bounds(
+                10, 18, 
+                100, 108, 
+                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
+
+        new_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [TemporalStdDev(), TemporalStdDev()],
+            [bound, bound, bound, bound, bound]
+        )
+        new_eval.run()
+
+        # Expected result shape is
+        # [
+        #       [   # Subregions cause this extra layer
+        #           [3, temporalstddev.run(reference).shape],
+        #       ]
+        # ]
+        self.assertTrue(len(new_eval.unary_results) == 5)  # number of subregions
+        self.assertTrue(len(new_eval.unary_results[0]) == 2) # number of metrics
+        self.assertTrue(type(new_eval.unary_results) == type([]))
+        self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3) # number of datasets (ref + target)
+
+
 if __name__  == '__main__':
     unittest.main()
diff --git a/ocw/tests/test_local.py b/ocw/tests/test_local.py
index 27f4245..a9e1a71 100644
--- a/ocw/tests/test_local.py
+++ b/ocw/tests/test_local.py
@@ -69,6 +69,14 @@
         ds = local.load_file(self.file_path, 'value', name='foo')
         self.assertEqual(ds.name, 'foo')
 
+    def test_dataset_origin(self):
+        ds = local.load_file(self.file_path, 'value', elevation_index=1)
+        expected_keys = set(['source', 'path', 'lat_name', 'lon_name',
+                             'time_name', 'elevation_index' ])
+        self.assertEqual(set(ds.origin.keys()), expected_keys)
+        self.assertEqual(ds.origin['source'], 'local')
+
+
 class test_get_netcdf_variable_names(unittest.TestCase):
     file_path = "http://zipper.jpl.nasa.gov/dist/"
     test_model = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
@@ -145,6 +153,7 @@
         values[:] = values
         #Assign time info to time variable
         netCDF_file.variables['time'].units = 'months since 2001-01-01 00:00:00' 
+        netCDF_file.variables['value'].units = 'foo_units'
         netCDF_file.close()
         return file_path
 
diff --git a/ocw/tests/test_metrics.py b/ocw/tests/test_metrics.py
index 571ad4d..facf1d3 100644
--- a/ocw/tests/test_metrics.py
+++ b/ocw/tests/test_metrics.py
@@ -24,6 +24,7 @@
 import ocw.metrics as metrics
 
 import numpy as np
+import numpy.ma as ma
 import numpy.testing as npt
 
 class TestBias(unittest.TestCase):
@@ -54,8 +55,32 @@
         '''Test bias function between reference dataset and target dataset.'''
         expected_result = np.zeros((12, 5, 5), dtype=np.int)
         expected_result.fill(-300)
-        np.testing.assert_array_equal(self.bias.run(self.reference_dataset, self.target_dataset), expected_result)
+        np.testing.assert_array_equal(self.bias.run(self.target_dataset, self.reference_dataset), expected_result)
 
+class TestSpatialPatternTaylorDiagram(unittest.TestCase):
+    '''Test the metrics.SpatialPatternTaylorDiagram'''
+    def setUp(self):
+        self.taylor_diagram = metrics.SpatialPatternTaylorDiagram()
+        self.ref_dataset = Dataset(
+            np.array([1., 1., 1., 1., 1.]),
+            np.array([1., 1., 1., 1., 1.]),
+            np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]),
+            # Reshapped array with 300 values incremented by 5
+            np.arange(0, 1500, 5).reshape(12, 5, 5),
+            'ds1'
+        )
+
+        self.tar_dataset = Dataset(
+            np.array([1., 1., 1., 1., 1.]),
+            np.array([1., 1., 1., 1., 1.]),
+            np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]),
+            # Reshapped array with 300 values incremented by 2
+            np.arange(0, 600, 2).reshape(12, 5, 5),
+            'ds2'
+        )
+
+    def test_function_run(self):
+        np.testing.assert_array_equal(self.taylor_diagram.run(self.ref_dataset, self.tar_dataset), ma.array([0.4,1.0]))
 
 class TestTemporalStdDev(unittest.TestCase):
     '''Test the metrics.TemporalStdDev metric.'''
@@ -102,7 +127,7 @@
         )
 
     def test_function_run(self):
-        self.assertTrue(self.std_dev_ratio.run(self.ref_dataset, self.tar_dataset), 2.5)
+        self.assertTrue(self.std_dev_ratio.run(self.ref_dataset, self.tar_dataset), 0.4)
 
 
 class TestPatternCorrelation(unittest.TestCase):
@@ -161,22 +186,18 @@
 
     def test_identical_inputs(self):
         expected = np.ones(25).reshape(5, 5)
-        tc, cl = self.metric.run(self.ref_dataset, self.ref_dataset)
+        tc = self.metric.run(self.ref_dataset, self.ref_dataset)
         np.testing.assert_array_equal(tc, expected)
-        np.testing.assert_array_equal(cl, expected)
 
     def test_positive_correlation(self):
         expected = np.ones(25).reshape(5, 5)
-        tc, cl = self.metric.run(self.ref_dataset, self.tgt_dataset_inc)
+        tc = self.metric.run(self.ref_dataset, self.tgt_dataset_inc)
         np.testing.assert_array_equal(tc, expected)
-        np.testing.assert_array_equal(cl, expected)
 
     def test_negative_correlation(self):
         expected_tc = np.array([-1] * 25).reshape(5, 5)
-        expected_cl = np.ones(25).reshape(5, 5)
-        tc, cl = self.metric.run(self.ref_dataset, self.tgt_dataset_dec)
+        tc = self.metric.run(self.ref_dataset, self.tgt_dataset_dec)
         np.testing.assert_array_equal(tc, expected_tc)
-        np.testing.assert_array_equal(cl, expected_cl)
 
 
 class TestTemporalMeanBias(unittest.TestCase):
@@ -206,43 +227,7 @@
         '''Test mean bias function between reference dataset and target dataset.'''
         expected_result = np.zeros((5, 5), dtype=np.int)
         expected_result.fill(-300)
-        np.testing.assert_array_equal(self.mean_bias.run(self.reference_dataset, self.target_dataset), expected_result)
-
-    def test_function_run_abs(self):
-        '''Test mean bias function between reference dataset and target dataset with abs as True.'''
-        expected_result = np.zeros((5, 5), dtype=np.int)
-        expected_result.fill(300)
-        np.testing.assert_array_equal(self.mean_bias.run(self.reference_dataset, self.target_dataset, True), expected_result)
-
-
-class TestSpatialMeanOfTemporalMeanBias(unittest.TestCase):
-    '''Test the metrics.SpatialMeanOfTemporalMeanBias metric.'''
-    def setUp(self):
-        # Set metric.
-        self.metric = metrics.SpatialMeanOfTemporalMeanBias()
-        # Initialize reference dataset.
-        self.ref_lats = np.array([10, 20, 30, 40, 50])
-        self.ref_lons = np.array([5, 15, 25, 35, 45])
-        self.ref_times = np.array([dt.datetime(2000, x, 1)
-                                   for x in range(1, 13)])
-        self.ref_values = np.array(range(300)).reshape(12, 5, 5)
-        self.ref_variable = "ref"
-        self.ref_dataset = Dataset(self.ref_lats, self.ref_lons,
-            self.ref_times, self.ref_values, self.ref_variable)
-        # Initialize target dataset.
-        self.tgt_lats = np.array([10, 20, 30, 40, 50])
-        self.tgt_lons = np.array([5, 15, 25, 35, 45])
-        self.tgt_times = np.array([dt.datetime(2000, x, 1)
-                                   for x in range(1, 13)])
-        self.tgt_values = np.array(range(299, -1, -1)).reshape(12, 5, 5)
-        self.tgt_variable = "tgt"
-        self.tgt_dataset = Dataset(self.tgt_lats, self.tgt_lons,
-            self.tgt_times, self.tgt_values, self.tgt_variable)
-
-    def test_function_run(self):
-        result = self.metric.run(self.ref_dataset, self.tgt_dataset)
-        self.assertEqual(result, 0.0)
-
+        np.testing.assert_array_equal(self.mean_bias.run(self.target_dataset,self.reference_dataset), expected_result)
 
 class TestRMSError(unittest.TestCase):
     '''Test the metrics.RMSError metric.'''
diff --git a/ocw/tests/test_rcmed.py b/ocw/tests/test_rcmed.py
index 6c882f4..58c38bc 100644
--- a/ocw/tests/test_rcmed.py
+++ b/ocw/tests/test_rcmed.py
@@ -108,5 +108,21 @@
         ds = rcmed.parameter_dataset(self.dataset_id, self.parameter_id, self.min_lat, self.max_lat, self.min_lon, self.max_lon, self.start_time, self.end_time, name='foo')
         self.assertEquals(ds.name, 'foo')
 
+    def test_dataset_origin(self):
+        rcmed.urllib2.urlopen = self.return_text
+        ds = rcmed.parameter_dataset(self.dataset_id,
+                                     self.parameter_id,
+                                     self.min_lat,
+                                     self.max_lat,
+                                     self.min_lon,
+                                     self.max_lon,
+                                     self.start_time,
+                                     self.end_time,
+                                     name='foo')
+
+        self.assertEquals(ds.origin['source'], 'rcmed')
+        self.assertEquals(ds.origin['dataset_id'], self.dataset_id)
+        self.assertEquals(ds.origin['parameter_id'], self.parameter_id)
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/ocw/tests/test_utils.py b/ocw/tests/test_utils.py
index da5ad42..36cf42f 100644
--- a/ocw/tests/test_utils.py
+++ b/ocw/tests/test_utils.py
@@ -213,9 +213,29 @@
 
     def test_calc_climatology_monthly(self):
         expected_result = np.ones(300).reshape(12, 5, 5)
-        actual_result = utils.calc_climatology_monthly(self.dataset)
+        expected_times = np.array([datetime.datetime(1, 1, 1) + relativedelta(months = x) 
+                                   for x in range(12)])
+        actual_result, actual_times = utils.calc_climatology_monthly(self.dataset)
         np.testing.assert_array_equal(actual_result, expected_result)
+        np.testing.assert_array_equal(actual_times, expected_times)
 
+class TestCalcTimeSeries(unittest.TestCase):
+    ''' Tests the 'calc_time_series' method from ocw.utils.py '''
 
+    def setUp(self):
+        self.lats = np.array([10, 20, 30, 40, 50])
+        self.lons = np.array([20, 30, 40, 50, 60])
+        start_date = datetime.datetime(2000, 1, 1)
+        self.times = np.array([start_date + relativedelta(months=x)
+                               for x in range(12)])
+        self.values = np.ones(300).reshape(12, 5, 5)
+        self.variable = 'testdata'
+        self.dataset = Dataset(self.lats, self.lons, self.times,
+                               self.values, self.variable)
+
+    def test_calc_time_series(self):
+        expected_result = np.ones(12)
+        np.testing.assert_array_equal(utils.calc_time_series(self.dataset), expected_result)
+        
 if __name__ == '__main__':
     unittest.main()
diff --git a/ocw/utils.py b/ocw/utils.py
old mode 100644
new mode 100755
index c4b640f..3c7beb0
--- a/ocw/utils.py
+++ b/ocw/utils.py
@@ -20,9 +20,12 @@
 import sys
 import datetime as dt
 import numpy as np
+import numpy.ma as ma
+import datetime 
 
 from mpl_toolkits.basemap import shiftgrid
 from dateutil.relativedelta import relativedelta
+from netCDF4 import num2date
 
 def decode_time_values(dataset, time_var_name):
     ''' Decode NetCDF time values into Python datetime objects.
@@ -52,10 +55,12 @@
         for time_val in time_data:
             times.append(time_base + relativedelta(months=int(time_val)))
     else:
-        for time_val in time_data:
-            arg[time_units] = time_val
-            times.append(time_base + dt.timedelta(**arg))
+        try:
+            times_calendar = time_data.calendar
+        except:
+            times_calendar = 'standard'
 
+        times = num2date(time_data[:], units=time_format, calendar=times_calendar)
     return times
 
 def parse_time_units(time_format):
@@ -112,6 +117,7 @@
         '%Y/%m/%d%H:%M:%S', '%Y-%m-%d %H:%M', '%Y/%m/%d %H:%M',
         '%Y:%m:%d %H:%M', '%Y%m%d %H:%M', '%Y-%m-%d', '%Y/%m/%d',
         '%Y:%m:%d', '%Y%m%d', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H',
+	'%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%SZ'
     ]
 
     # Attempt to match the base time string with a possible format parsing string.
@@ -178,42 +184,45 @@
 
     :raises ValueError: If the lat/lon values are not sorted.
     '''
-    # Avoid unnecessary shifting if all lons are higher than 180
-    if lons.min() > 180:
-        lons -= 360
+    if lats.ndim ==1 and lons.ndim ==1:
+        # Avoid unnecessary shifting if all lons are higher than 180
+        if lons.min() > 180:
+            lons -= 360
 
-    # Make sure lats and lons are monotonically increasing
-    lats_decreasing = np.diff(lats) < 0
-    lons_decreasing = np.diff(lons) < 0
+    	# Make sure lats and lons are monotonically increasing
+    	lats_decreasing = np.diff(lats) < 0
+    	lons_decreasing = np.diff(lons) < 0
 
-    # If all values are decreasing then they just need to be reversed
-    lats_reversed, lons_reversed = lats_decreasing.all(), lons_decreasing.all()
+    	# If all values are decreasing then they just need to be reversed
+    	lats_reversed, lons_reversed = lats_decreasing.all(), lons_decreasing.all()
 
-    # If the lat values are unsorted then raise an exception
-    if not lats_reversed and lats_decreasing.any():
-        raise ValueError('Latitudes must be sorted.')
+    	# If the lat values are unsorted then raise an exception
+    	if not lats_reversed and lats_decreasing.any():
+            raise ValueError('Latitudes must be sorted.')
 
-    # Perform same checks now for lons
-    if not lons_reversed and lons_decreasing.any():
-        raise ValueError('Longitudes must be sorted.')
+    	# Perform same checks now for lons
+    	if not lons_reversed and lons_decreasing.any():
+            raise ValueError('Longitudes must be sorted.')
 
-    # Also check if lons go from [0, 360), and convert to [-180, 180)
-    # if necessary
-    lons_shifted = lons.max() > 180
-    lats_out, lons_out, data_out = lats[:], lons[:], values[:]
-    # Now correct data if latlon grid needs to be shifted
-    if lats_reversed:
-        lats_out = lats_out[::-1]
-        data_out = data_out[..., ::-1, :]
+    	# Also check if lons go from [0, 360), and convert to [-180, 180)
+    	# if necessary
+    	lons_shifted = lons.max() > 180
+    	lats_out, lons_out, data_out = lats[:], lons[:], values[:]
+    	# Now correct data if latlon grid needs to be shifted
+    	if lats_reversed:
+            lats_out = lats_out[::-1]
+            data_out = data_out[..., ::-1, :]
 
-    if lons_reversed:
-        lons_out = lons_out[::-1]
-        data_out = data_out[..., ::-1]
+    	if lons_reversed:
+            lons_out = lons_out[::-1]
+            data_out = data_out[..., ::-1]
 
-    if lons_shifted:
-        data_out, lons_out = shiftgrid(180, data_out, lons_out, start=False)
+    	if lons_shifted:
+            data_out, lons_out = shiftgrid(180, data_out, lons_out, start=False)
 
-    return lats_out, lons_out, data_out
+        return lats_out, lons_out, data_out
+    else:
+        return lats, lons, values
 
 
 def reshape_monthly_to_annually(dataset):
@@ -235,7 +244,6 @@
     :type dataset: :class:`dataset.Dataset`
 
     :returns: Dataset values array with shape (num_year, 12, num_lat, num_lon)
-    :rtype: :class:`numpy.ndarray`
     '''
 
     values = dataset.values[:]
@@ -252,16 +260,25 @@
 
     return values
 
+def calc_temporal_mean(dataset):
+    ''' Calculate temporal mean of dataset's values 
+
+    :param dataset: OCW Dataset whose first dimension is time 
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: Mean values averaged for the first dimension (time)
+    '''
+    return ma.mean(dataset.values, axis=0)
+
 def calc_climatology_year(dataset):
     ''' Calculate climatology of dataset's values for each year
-
+    
     :param dataset: Monthly binned Dataset object with an evenly divisible
         number of months.
     :type dataset: :class:`dataset.Dataset`
 
     :returns: Mean values for each year (annual_mean) and mean values for all
         years (total_mean)
-    :rtype: A :func:`tuple` of two :class:`numpy.ndarray`
 
     :raise ValueError: If the number of monthly bins is not evenly divisible
         by 12.
@@ -274,57 +291,26 @@
     else:
         # Get values reshaped to (num_year, 12, num_lats, num_lons)
         values = reshape_monthly_to_annually(dataset)
-        # Calculate mean values over year (num_year, num_lats, num_lons)
+       # Calculate mean values over year (num_year, num_lats, num_lons)
         annually_mean = values.mean(axis=1)
         # Calculate mean values over all years (num_lats, num_lons)
         total_mean = annually_mean.mean(axis=0)
 
     return annually_mean, total_mean
 
-def calc_climatology_season(month_start, month_end, dataset):
-    ''' Calculate seasonal mean and time series for given months.
-
-    :param month_start: An integer for beginning month (Jan=1)
-    :type month_start: :class:`int`
-
-    :param month_end: An integer for ending month (Jan=1)
-    :type month_end: :class:`int`
-
-    :param dataset: Dataset object with full-year format
-    :type dataset: :class:`dataset.Dataset`
-
-    :returns:  
-        t_series - monthly average over the given season
-        means - mean over the entire season
-    :rtype: A :func:`tuple` of two :class:`numpy.ndarray`
-    '''
-
-    if month_start > month_end:
-        # Offset the original array so that the the first month
-        # becomes month_start, note that this cuts off the first year of data
-        offset = slice(month_start - 1, month_start - 13)
-        reshape_data = reshape_monthly_to_annually(dataset[offset])
-        month_index = slice(0, 13 - month_start + month_end)
-    else:
-        # Since month_start <= month_end, just take a slice containing those months
-        reshape_data = reshape_monthly_to_annually(dataset)
-        month_index = slice(month_start - 1, month_end)
-    
-    t_series = reshape_data[:, month_index].mean(axis=1)
-    means = t_series.mean(axis=0)
-    return t_series, means
-
-
 def calc_climatology_monthly(dataset):
     ''' Calculate monthly mean values for a dataset.
+    Follow COARDS climo stats calculation, the year can be given as 0 
+    but the min year allowed in Python is 1
+    http://www.cgd.ucar.edu/cms/eaton/netcdf/CF-20010629.htm#climatology
 
     :param dataset: Monthly binned Dataset object with the number of months
         divisible by 12
     :type dataset: :class:`dataset.Dataset`
 
-    :returns: Mean values for each month of the year
-    :rtype: A 3D :class:`numpy.ndarray` of shape (12, num_lats, num_lons)
-
+    :returns: Mean values for each month of the year of shape (12, num_lats, num_lons)
+              and times array of datetime objects of length 12
+    
     :raise ValueError: If the number of monthly bins is not divisible by 12
     '''
 
@@ -335,5 +321,98 @@
         )
         raise ValueError(error)
     else:
-        return reshape_monthly_to_annually(dataset).mean(axis=0)
+        values = reshape_monthly_to_annually(dataset).mean(axis=0)
+        
+        # A year can commence from any month
+        first_month = dataset.times[0].month
+        times = np.array([datetime.datetime(1, first_month, 1) + relativedelta(months = x) 
+                for x in range(12)])
+        return values, times
 
+def calc_time_series(dataset):
+    ''' Calculate time series mean values for a dataset
+
+    :param dataset: Dataset object 
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: time series for the dataset of shape (nT)
+    '''
+
+    t_series =[]
+    for t in xrange(dataset.values.shape[0]):
+        t_series.append(dataset.values[t,:,:].mean())
+    
+    return t_series
+
+def get_temporal_overlap(dataset_array):
+    ''' Find the maximum temporal overlap across the observation and model datasets
+
+    :param dataset_array: an array of OCW datasets
+    '''
+    start_time =[]
+    end_time =[]
+    for dataset in dataset_array:
+        start_time.append(dataset.time_range()[0])
+        end_time.append(dataset.time_range()[1])
+
+    return np.max(start_time), np.min(end_time)
+
+def calc_subregion_area_mean_and_std(dataset_array, subregions):
+    ''' Calculate area mean and standard deviation values for a given subregions using datasets on common grid points
+    :param dataset_array: An array of OCW Dataset Objects
+    :type list:  
+    :param subregions: list of subregions
+    :type subregions: :class:`numpy.ma.array`
+    :returns: area averaged time series for the dataset of shape (ntime, nsubregion)
+    '''
+
+    ndata = len(dataset_array)
+    dataset0 = dataset_array[0]
+    if dataset0.lons.ndim == 1:
+       lons, lats = np.meshgrid(dataset0.lons, dataset0.lats)
+    else:
+       lons = dataset0.lons
+       lats = dataset0.lats
+    subregion_array = np.zeros(lons.shape)
+    mask_array = dataset_array[0].values[0,:].mask
+    # dataset0.values.shsape[0]: length of the time dimension
+    # spatial average
+    t_series =ma.zeros([ndata, dataset0.values.shape[0], len(subregions)])
+    # spatial standard deviation
+    spatial_std =ma.zeros([ndata, dataset0.values.shape[0], len(subregions)])
+
+    for iregion, subregion in enumerate(subregions):
+        lat_min, lat_max, lon_min, lon_max = subregion[1]
+        y_index,x_index = np.where((lats >= lat_min) & (lats <= lat_max) & (lons >= lon_min) & (lons <= lon_max))
+        subregion_array[y_index,x_index] = iregion+1
+        for idata in np.arange(ndata):
+            t_series[idata, :, iregion] = ma.mean(dataset_array[idata].values[:,y_index, x_index], axis=1)
+            spatial_std[idata, :, iregion] = ma.std(dataset_array[idata].values[:,y_index, x_index], axis=1)
+    subregion_array = ma.array(subregion_array, mask=mask_array) 
+    return t_series, spatial_std, subregion_array
+
+def calc_area_weighted_spatial_average(dataset, area_weight=False):
+    '''Calculate area weighted average of the values in OCW dataset
+
+    :param dataset: Dataset object 
+    :type dataset: :class:`dataset.Dataset`
+
+    :returns: time series for the dataset of shape (nT)
+    '''
+
+    if dataset.lats.ndim ==1:
+        lons, lats = np.meshgrid(dataset.lons, dataset.lats)
+    else:
+        lons = dataset.lons
+        lats = dataset.lats
+    weights = np.cos(lats*np.pi/180.) 
+
+    nt, ny, nx = dataset.values.shape
+    spatial_average = ma.zeros(nt)
+    for it in np.arange(nt):
+        if area_weight:
+            spatial_average[it] = ma.average(dataset.values[it,:], weights = weights)
+        else:
+            spatial_average[it] = ma.average(dataset.values[it,:])
+
+    return spatial_average