Merge branch 'CLIMATE-917' of https://github.com/MichaelArthurAnderson/climate
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f9eaafc..3afb88b 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -14,6 +14,11 @@
import sys
import os
+# esgf is not currently available for Python 3 and will throw an
+# error when building the documents.
+if sys.version_info[0] >= 3:
+ autodoc_mock_imports = ["esgf"]
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -33,11 +38,15 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+
+# Note that 'sphinxcontrib.autohttp.bottle' is currently broken in Sphinx > 1.56
+# Remove from the extension list if documentation fails on Sphinx hard failure.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinxcontrib.httpdomain',
+ 'sphinxcontrib.autohttp.bottle'
]
# Add any paths that contain templates here, relative to this directory.
diff --git a/ocw/dataset.py b/ocw/dataset.py
index 196913a..0a0e1a6 100644
--- a/ocw/dataset.py
+++ b/ocw/dataset.py
@@ -117,6 +117,7 @@
If self.lats and self.lons are from curvilinear coordinates,
the output resolutions are approximate values.
+
:returns: The Dataset's latitudinal and longitudinal spatial resolution
as a tuple of the form (lat_resolution, lon_resolution).
:rtype: (:class:`float`, :class:`float`)
@@ -264,7 +265,7 @@
start=None, end=None):
'''Default Bounds constructor
:param boundary_type: The type of spatial subset boundary.
- :type boundary_type: :mod:`string'
+ :type boundary_type: :mod:`string`
:param lat_min: The minimum latitude bound.
diff --git a/ocw/dataset_loader.py b/ocw/dataset_loader.py
index 4b2a925..f84bdc4 100644
--- a/ocw/dataset_loader.py
+++ b/ocw/dataset_loader.py
@@ -33,33 +33,31 @@
Each keyword argument can be information for a dataset in dictionary
form. For example:
- ``
+
>>> loader_opt1 = {'loader_name': 'rcmed', 'name': 'cru',
'dataset_id': 10, 'parameter_id': 34}
>>> loader_opt2 = {'path': './data/TRMM_v7_3B43_1980-2010.nc,
'variable': 'pcp'}
>>> loader = DatasetLoader(loader_opt1, loader_opt2)
- ``
Or more conveniently if the loader configuration is defined in a
yaml file named config_file (see RCMES examples):
- ``
+
>>> import yaml
>>> config = yaml.load(open(config_file))
>>> obs_loader_config = config['datasets']['reference']
>>> loader = DatasetLoader(*obs_loader_config)
- ``
As shown in the first example, the dictionary for each argument should
contain a loader name and parameters specific to the particular loader.
Once the configuration is entered, the datasets may be loaded using:
- ``
+
>>> loader.load_datasets()
>>> obs_datasets = loader.datasets
- ``
Additionally, each dataset must have a ``loader_name`` keyword. This may
be one of the following:
+
* ``'local'`` - One or multiple dataset files in a local directory
* ``'local_split'`` - A single dataset split accross multiple files in a
local directory
@@ -74,6 +72,7 @@
Users who wish to load datasets from loaders not described above may
define their own custom dataset loader function and incorporate it as
follows:
+
>>> loader.add_source_loader('my_loader_name', my_loader_func)
:param loader_opts: Dictionaries containing the each dataset loader
@@ -84,7 +83,7 @@
:type loader_opts: :class:`dict`
:raises KeyError: If an invalid argument is passed to a data source
- loader function.
+ loader function.
'''
# dataset loader config
self.set_loader_opts(*loader_opts)
@@ -115,8 +114,8 @@
:type loader_name: :mod:`string`
:param loader_func: Reference to a custom defined function. This should
- return an OCW Dataset object, and have an origin which satisfies
- origin['source'] == loader_name.
+ return an OCW Dataset object, and have an origin which satisfies
+ origin['source'] == loader_name.
:type loader_func: :class:`callable`
'''
self._source_loaders[loader_name] = loader_func
diff --git a/ocw/utils.py b/ocw/utils.py
index c2b62cf..8f2c8c1 100755
--- a/ocw/utils.py
+++ b/ocw/utils.py
@@ -402,7 +402,7 @@
''' Trim datasets such that first and last year of data have all 12 months
:param dataset: Dataset object
- :type dataset: :class:`dataset.Dataset
+ :type dataset: :class:`dataset.Dataset`
:returns: Slice index for trimmed dataset
'''
@@ -653,7 +653,7 @@
def calculate_temporal_trends(dataset):
''' Calculate temporal trends in dataset.values
:param dataset: The dataset from which time values should be extracted.
- :type dataset: :class:`dataset.Dataset'
+ :type dataset: :class:`dataset.Dataset`
:returns: Arrays of the temporal trend and standard error
:rtype: :class:`numpy.ma.core.MaskedArray`
@@ -675,13 +675,13 @@
def calculate_ensemble_temporal_trends(timeseries_array, number_of_samples=1000):
''' Calculate temporal trends in an ensemble of time series
:param timeseries_array: Two dimensional array. 1st index: model, 2nd index: time.
- :type timeseries_array: :class:`numpy.ndarray'
+ :type timeseries_array: :class:`numpy.ndarray`
:param sampling: A list whose elements are one-dimensional numpy arrays
- :type timeseries_array: :class:`list'
+ :type timeseries_array: :class:`list`
:returns: temporal trend and estimated error from bootstrapping
- :rtype: :float:`float','float'
+ :rtype: :class:`float`, :class:`float`
'''
nmodels, nt = timeseries_array.shape
@@ -701,13 +701,13 @@
def calculate_temporal_trend_of_time_series(x,y):
''' Calculate least-square trends (a) in y = ax+b and a's standard error
:param x: time series
- :type x: :class:`numpy.ndarray'
+ :type x: :class:`numpy.ndarray`
:param x: time series
- :type x: :class:`numpy.ndarray'
+ :type x: :class:`numpy.ndarray`
:returns: temporal trend and standard error
- :rtype: :float:`float','float'
+ :rtype: :class:`float`, :class:`float`
'''
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
return slope, std_err