Merge branch 'master' into CLIMATE-891
diff --git a/examples/GPM_WRF24_JPDF_comparison.py b/examples/GPM_WRF24_JPDF_comparison.py
index 45eee89..5697f2f 100644
--- a/examples/GPM_WRF24_JPDF_comparison.py
+++ b/examples/GPM_WRF24_JPDF_comparison.py
@@ -40,90 +40,95 @@
     5. plotter
 
 """
+from __future__ import print_function
 
-from ocw.dataset import Bounds
+import numpy as np
+
 import ocw.data_source.local as local
 import ocw.dataset_processor as dsp
 import ocw.metrics as metrics
 import ocw.plotter as plotter
+from ocw.dataset import Bounds
 
-import numpy as np
-import numpy.ma as ma
-from pickle import load, dump
-
-"""
-This is an example of calculating the joint probability distribution function of rainfall intensity and duration for the Northern Great Plains using GPM IMERG data for June/01/2015
-""" 
-
-""" Step 1: Load the GPM and WRF24 datasets with spatial filter """
+# Step 1: Load the GPM and WRF24 datasets with spatial filter.
 
 GPM_dataset_filtered = local.load_GPM_IMERG_files_with_spatial_filter(
-                           file_path='./data/GPM_2015_summer/', 
-                           filename_pattern=['*2015*.HDF5'],
-                           user_mask_file='Bukovsky_regions.nc',
-                           mask_variable_name='Bukovsky',
-                           user_mask_values=[10],
-                           longitude_name='lon',
-                           latitude_name='lat')
+    file_path='./data/GPM_2015_summer/',
+    user_mask_file='Bukovsky_regions.nc',
+    mask_variable_name='Bukovsky',
+    user_mask_values=[10],
+    longitude_name='lon',
+    latitude_name='lat')
 
 WRF_dataset = local.load_WRF_2d_files_RAIN(
-                  file_path='./data/WRF24_2010_summer/', 
-                  filename_pattern=['wrf2dout*'])
+    file_path='./data/WRF24_2010_summer/',
+    filename_pattern=['wrf2dout*'])
 
-""" Step 2: Load the spatial filter (Bukovsky region mask) """
+# Step 2: Load the spatial filter (Bukovsky region mask).
 
-Bukovsky_mask = Bounds(boundary_type='user',
-                       user_mask_file='Bukovsky_regions.nc',
-                       mask_variable_name='Bukovsky',
-                       longitude_name='lon',
-                       latitude_name='lat')  
+Bukovsky_mask = Bounds(
+    boundary_type='user',
+    user_mask_file='Bukovsky_regions.nc',
+    mask_variable_name='Bukovsky',
+    longitude_name='lon',
+    latitude_name='lat')
 
-""" Step 3: Spatial subset the WRF data (for Northern Great Plains, user_mask_values=[10]) """
+# Step 3: Spatial subset the WRF data (for Northern Great Plains, user_mask_values=[10]).
 
-WRF_dataset_filtered = dsp.subset(WRF_dataset, 
-                           Bukovsky_mask, user_mask_values=[10])
+WRF_dataset_filtered = \
+    dsp.subset(WRF_dataset, Bukovsky_mask, user_mask_values=[10])
 
-""" Step 4: Analyze the wet spells """
-duration1, peak1, total1 = metrics.wet_spell_analysis(GPM_dataset_filtered, 
-                               threshold=0.1, nyear=1, dt=0.5)
-duration2, peak2, total2 = metrics.wet_spell_analysis(WRF_dataset_filtered.values, 
-                               threshold=0.1, nyear=1, dt=0.5)
+# Step 4: Analyze the wet spells.
+duration1, peak1, total1 = \
+    metrics.wet_spell_analysis(GPM_dataset_filtered, threshold=0.1, nyear=1, dt=0.5)
 
-""" Step 5: Calculate the joint PDF(JPDF) of spell_duration and peak_rainfall """
+duration2, peak2, total2 =\
+    metrics.wet_spell_analysis(WRF_dataset_filtered.values, threshold=0.1, nyear=1, dt=0.5)
 
-histo2d_GPM = metrics.calc_joint_histogram(data_array1=duration1, data_array2=peak1, 
-                   bins_for_data1=np.append(np.arange(25)+0.5,[48.5,120.5]),
-                   bins_for_data2=[0.1,0.2,0.5,1.0,2.0,5.0,10,20,30]) 
-histo2d_GPM = histo2d_GPM/np.sum(histo2d_GPM)*100.
+# Step 5: Calculate the joint PDF(JPDF) of spell_duration and peak_rainfall.
 
-histo2d_WRF = metrics.calc_joint_histogram(data_array1=duration2, data_array2=peak2, 
-                   bins_for_data1=np.append(np.arange(25)+0.5,[48.5,120.5]),
-                   bins_for_data2=[0.1,0.2,0.5,1.0,2.0,5.0,10,20,30]) 
-histo2d_WRF = histo2d_WRF/np.sum(histo2d_WRF)*100.
+histo2d_GPM = \
+    metrics.calc_joint_histogram(data_array1=duration1, data_array2=peak1,
+                                 bins_for_data1=np.append(np.arange(25)+0.5, [48.5, 120.5]),
+                                 bins_for_data2=[0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10, 20, 30])
+histo2d_GPM = histo2d_GPM/np.sum(histo2d_GPM) * 100.
 
-""" Step 6: Visualize the JPDF """
+histo2d_WRF =\
+    metrics.calc_joint_histogram(data_array1=duration2, data_array2=peak2,
+                                 bins_for_data1=np.append(np.arange(25)+0.5, [48.5, 120.5]),
+                                 bins_for_data2=[0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10, 20, 30])
+
+histo2d_WRF = histo2d_WRF/np.sum(histo2d_WRF) * 100.
+
+# Step 6: Visualize the JPDF.
 
 
-plot_level = np.array([0., 0.01,0.05, 0.1, 0.2, 0.5,1,2,3,5,10,25])
-plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_GPM), 
-          plot_level=plot_level, title='', 
-          x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5], x_names=['1','5','10','15','20','24'],
-          y_ticks=np.arange(9), y_names=['0.1','0.2','0.5','1.0','2.0','5.0','10','20','30'], 
-          output_file='GPM_JPDF_example')
-plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_WRF), 
-          plot_level=plot_level, title='', 
-          x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5], x_names=['1','5','10','15','20','24'],
-          y_ticks=np.arange(9), y_names=['0.1','0.2','0.5','1.0','2.0','5.0','10','20','30'], 
-          output_file='WRF24_JPDF_example')
+plot_level = np.array([0., 0.01, 0.05, 0.1, 0.2, 0.5, 1, 2, 3, 5, 10, 25])
+plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_GPM),
+                                plot_level=plot_level, title='',
+                                x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5],
+                                x_names=['1', '5', '10', '15', '20', '24'],
+                                y_ticks=np.arange(9),
+                                y_names=['0.1', '0.2', '0.5', '1.0', '2.0', '5.0', '10', '20', '30'],
+                                output_file='GPM_JPDF_example')
+
+plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_WRF),
+                                plot_level=plot_level, title='',
+                                x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5],
+                                x_names=['1', '5', '10', '15', '20', '24'],
+                                y_ticks=np.arange(9),
+                                y_names=['0.1', '0.2', '0.5', '1.0', '2.0', '5.0', '10', '20', '30'],
+                                output_file='WRF24_JPDF_example')
 
 overlap = metrics.calc_histogram_overlap(histo2d_GPM, histo2d_WRF)
 plot_level = np.array([-21, -3, -1, -0.5, -0.2, -0.1, -0.05, 0, 0.05, 0.1, 0.2, 0.5, 1, 3, 21])
 cbar_ticks = [-2, -0.5, -0.1, 0.1, 0.5, 2]
 cbar_label = [str(i) for i in cbar_ticks]
-plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_WRF - histo2d_GPM), 
-          plot_level=plot_level, title='overlap %d ' %overlap +r'%', diff_plot=True,
-          x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5], x_names=['1','5','10','15','20','24'],
-          y_ticks=np.arange(9), y_names=['0.1','0.2','0.5','1.0','2.0','5.0','10','20','30'],
-          output_file='GPM_WRF24_JPDF_comparison',  
-          cbar_ticks=cbar_ticks, cbar_label=cbar_label)
-
+plotter.draw_precipitation_JPDF(plot_data=np.transpose(histo2d_WRF - histo2d_GPM),
+                                plot_level=plot_level, title='overlap %d ' % overlap + r'%',
+                                diff_plot=True, x_ticks=[0.5, 4.5, 9.5, 14.5, 19.5, 23.5],
+                                x_names=['1', '5', '10', '15', '20', '24'],
+                                y_ticks=np.arange(9),
+                                y_names=['0.1', '0.2', '0.5', '1.0', '2.0', '5.0', '10', '20', '30'],
+                                output_file='GPM_WRF24_JPDF_comparison',
+                                cbar_ticks=cbar_ticks, cbar_label=cbar_label)
diff --git a/examples/draw_climatology_map_MISR_AOD.py b/examples/draw_climatology_map_MISR_AOD.py
index c75d3b3..2346a1e 100644
--- a/examples/draw_climatology_map_MISR_AOD.py
+++ b/examples/draw_climatology_map_MISR_AOD.py
@@ -41,25 +41,26 @@
 
 """
 
-import ocw.dataset as ds
-import ocw.data_source.local as local
-import ocw.dataset_processor as dsp
-import ocw.plotter as plotter
+from __future__ import print_function
 
 import numpy as np
 import numpy.ma as ma
 
+import ocw.data_source.local as local
+import ocw.dataset as ds
+import ocw.dataset_processor as dsp
+import ocw.plotter as plotter
 
-''' data source: https://dx.doi.org/10.6084/m9.figshare.3753321.v1
-    AOD_monthly_2000-Mar_2016-FEB_from_MISR_L3_JOINT.nc is publicly available.'''
+# data source: https://dx.doi.org/10.6084/m9.figshare.3753321.v1
+#    AOD_monthly_2000-Mar_2016-FEB_from_MISR_L3_JOINT.nc is publicly available.
 dataset = local.load_file('AOD_monthly_2000-MAR_2016-FEB_from_MISR_L3_JOINT.nc',
                           'nonabsorbing_ave')
-''' Subset the data for East Asia'''
+# Subset the data for East Asia.
 Bounds = ds.Bounds(lat_min=20, lat_max=57.7, lon_min=90, lon_max=150)
 dataset = dsp.subset(dataset, Bounds)
 
-'''The original dataset includes nonabsorbing AOD values between March 2000 and February 2015. 
-dsp.temporal_subset will extract data in September-October-November.'''
+# The original dataset includes nonabsorbing AOD values between March 2000 and February 2015.
+# dsp.temporal_subset will extract data in September-October-November.
 dataset_SON = dsp.temporal_subset(
     dataset, month_start=9, month_end=11, average_each_year=True)
 
diff --git a/examples/esgf_integration_example.py b/examples/esgf_integration_example.py
index e541273..162986b 100644
--- a/examples/esgf_integration_example.py
+++ b/examples/esgf_integration_example.py
@@ -29,7 +29,6 @@
     1. datasource/esgf
 
 """
-
 from __future__ import print_function
 
 import ssl
@@ -38,7 +37,6 @@
 
 import ocw.data_source.esgf as esgf
 
-
 def main():
     """
     An example of using the OCW ESGF library.  Connects to an ESGF
diff --git a/examples/knmi_to_cru31_full_bias.py b/examples/knmi_to_cru31_full_bias.py
index 13b5686..dc822ba 100644
--- a/examples/knmi_to_cru31_full_bias.py
+++ b/examples/knmi_to_cru31_full_bias.py
@@ -46,21 +46,30 @@
     7. plotter
 
 """
+from __future__ import print_function
 
 import datetime
-import urllib
+import ssl
+import sys
 from os import path
 
 import numpy as np
 
 import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
-from ocw.dataset import Bounds as Bounds
 import ocw.dataset_processor as dsp
 import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
-import ssl
+from ocw.dataset import Bounds as Bounds
+
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
 
 if hasattr(ssl, '_create_unverified_context'):
     ssl._create_default_https_context = ssl._create_unverified_context
@@ -75,31 +84,29 @@
 OUTPUT_PLOT = "cru_31_tmax_knmi_africa_bias_full"
 
 # Download necessary NetCDF file if not present
-if path.exists(MODEL):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + MODEL, MODEL)
+if not path.exists(MODEL):
+    urlretrieve(FILE_LEADER + MODEL, MODEL)
 
-""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+# Step 1: Load Local NetCDF File into OCW Dataset Objects.
 print("Loading %s into an OCW Dataset Object" % (MODEL,))
 knmi_dataset = local.load_file(MODEL, "tasmax")
 print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" %
       (knmi_dataset.values.shape,))
 
-""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module.
 print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp")
 metadata = rcmed.get_parameters_metadata()
 
 cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0]
 
-""" The RCMED API uses the following function to query, subset and return the 
-raw data from the database:
+# The RCMED API uses the following function to query, subset and return the
+# raw data from the database:
+#
+# rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon,
+#                        max_lon, start_time, end_time)
+#
+# The first two required params are in the cru_31 variable we defined earlier
 
-rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, 
-                        max_lon, start_time, end_time)
-
-The first two required params are in the cru_31 variable we defined earlier
-"""
 # Must cast to int since the rcmed api requires ints
 dataset_id = int(cru_31['dataset_id'])
 parameter_id = int(cru_31['parameter_id'])
@@ -139,7 +146,7 @@
                                         start_time,
                                         end_time)
 
-""" Step 3: Resample Datasets so they are the same shape """
+# Step 3: Resample Datasets so they are the same shape.
 print("CRU31_Dataset.values shape: (times, lats, lons) - %s" %
       (cru31_dataset.values.shape,))
 print("KNMI_Dataset.values shape: (times, lats, lons) - %s" %
@@ -164,7 +171,7 @@
 print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
 print("CRU31_Dataset.values shape: %s \n\n" % (cru31_dataset.values.shape,))
 
-""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """
+# Spatially Regrid the Dataset Objects to a 1/2 degree grid.
 # Using the bounds we will create a new set of lats and lons on 0.5 degree step
 new_lons = np.arange(min_lon, max_lon, 0.5)
 new_lats = np.arange(min_lat, max_lat, 0.5)
@@ -177,12 +184,12 @@
 print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, ))
 print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, ))
 
-""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
 # You can build your own metrics, but OCW also ships with some common metrics
 print("Setting up a Bias metric to use for evaluation")
 bias = metrics.Bias()
 
-""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# Step 5: Create an Evaluation Object using Datasets and our Metric.
 # The Evaluation Class Signature is:
 # Evaluation(reference, targets, metrics, subregions=None)
 # Evaluation can take in multiple targets and metrics, so we need to convert
@@ -192,7 +199,7 @@
 print("Executing the Evaluation using the object's run() method")
 bias_evaluation.run()
 
-""" Step 6: Make a Plot from the Evaluation.results """
+# Step 6: Make a Plot from the Evaluation.results.
 # The Evaluation.results are a set of nested lists to support many different
 # possible Evaluation scenarios.
 #
diff --git a/examples/model_ensemble_to_rcmed.py b/examples/model_ensemble_to_rcmed.py
index 787367b..e99c756 100644
--- a/examples/model_ensemble_to_rcmed.py
+++ b/examples/model_ensemble_to_rcmed.py
@@ -17,12 +17,9 @@
 
 """
     model_ensemble_to_rcmed.py
-
     Use OCW to download, evaluate and plot (contour map) two datasets
     against a reference dataset and OCW standard metrics (bias).
-
     In this example:
-
     1. Download two netCDF files from a local site.
         AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
         AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc
@@ -34,9 +31,7 @@
     6. Build a bias metric to use for evaluation use the standard OCW metric set.
     7. Create an evaluation object using the datasets and metric.
     8. Plot the results of the evaluation (contour map).
-
     OCW modules demonstrated:
-
     1. datasource/local
     2. datasource/rcmed
     3. dataset
@@ -44,24 +39,32 @@
     5. metrics
     6. evaluation
     7. plotter
-
 """
+from __future__ import print_function
 
 import datetime
 import math
-import urllib
+import ssl
+import sys
 from os import path
 
 import numpy as np
 
 import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
-from ocw.dataset import Bounds as Bounds
 import ocw.dataset_processor as dsp
 import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
-import ssl
+from ocw.dataset import Bounds as Bounds
+
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
 
 if hasattr(ssl, '_create_unverified_context'):
     ssl._create_default_https_context = ssl._create_unverified_context
@@ -77,18 +80,14 @@
 OUTPUT_PLOT = "tasmax_africa_bias_annual"
 
 # Download necessary NetCDF file if not present
-if path.exists(FILE_1):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+if not path.exists(FILE_1):
+    urlretrieve(FILE_LEADER + FILE_1, FILE_1)
 
-if path.exists(FILE_2):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+if not path.exists(FILE_2):
+    urlretrieve(FILE_LEADER + FILE_2, FILE_2)
 
 
-""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+# Step 1: Load Local NetCDF File into OCW Dataset Objects.
 # Load local knmi model data
 knmi_dataset = local.load_file(FILE_1, "tasmax")
 knmi_dataset.name = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax"
@@ -97,20 +96,18 @@
 wrf311_dataset.name = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax"
 
 
-""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module.
 print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp")
 metadata = rcmed.get_parameters_metadata()
 
 cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0]
 
-""" The RCMED API uses the following function to query, subset and return the 
-raw data from the database:
+# The RCMED API uses the following function to query, subset and return the
+# raw data from the database:
+# rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon,
+#                        max_lon, start_time, end_time)
+# The first two required params are in the cru_31 variable we defined earlier
 
-rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, 
-                        max_lon, start_time, end_time)
-
-The first two required params are in the cru_31 variable we defined earlier
-"""
 # Must cast to int since the rcmed api requires ints
 dataset_id = int(cru_31['dataset_id'])
 parameter_id = int(cru_31['parameter_id'])
@@ -144,7 +141,7 @@
                                         start_time,
                                         end_time)
 
-""" Step 3: Resample Datasets so they are the same shape """
+# Step 3: Resample Datasets so they are the same shape.
 
 # Running Temporal Rebin early helps negate the issue of datasets being on different
 # days of the month (1st vs. 15th)
@@ -177,7 +174,7 @@
 knmi_dataset = dsp.subset(knmi_dataset, new_bounds)
 wrf311_dataset = dsp.subset(wrf311_dataset, new_bounds)
 
-""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """
+# Spatially Regrid the Dataset Objects to a 1/2 degree grid.
 # Using the bounds we will create a new set of lats and lons on 1/2 degree step
 new_lons = np.arange(min_lon, max_lon, 0.5)
 new_lats = np.arange(min_lat, max_lat, 0.5)
@@ -190,24 +187,23 @@
 # Generate an ensemble dataset from knmi and wrf models
 ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset])
 
-""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
 print("Setting up a Bias metric to use for evaluation")
 bias = metrics.Bias()
 
-""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# Step 5: Create an Evaluation Object using Datasets and our Metric.
 # The Evaluation Class Signature is:
 # Evaluation(reference, targets, metrics, subregions=None)
 # Evaluation can take in multiple targets and metrics, so we need to convert
 # our examples into Python lists.  Evaluation will iterate over the lists
 print("Making the Evaluation definition")
-bias_evaluation = evaluation.Evaluation(cru31_dataset,
-                                        [knmi_dataset, wrf311_dataset,
-                                            ensemble_dataset],
-                                        [bias])
+bias_evaluation =\
+    evaluation.Evaluation(cru31_dataset, [knmi_dataset, wrf311_dataset, ensemble_dataset], [bias])
+
 print("Executing the Evaluation using the object's run() method")
 bias_evaluation.run()
 
-""" Step 6: Make a Plot from the Evaluation.results """
+# Step 6: Make a Plot from the Evaluation.results.
 # The Evaluation.results are a set of nested lists to support many different
 # possible Evaluation scenarios.
 #
@@ -224,12 +220,15 @@
 lats = new_lats
 lons = new_lons
 fname = OUTPUT_PLOT
-gridshape = (3, start_time.year - end_time.year + 1)  # Using a 3 x N since we have a N year(s) of data for 3 models
+
+# Using a 3 x N since we have a N year(s) of data for 3 models
+gridshape = (3, start_time.year - end_time.year + 1)
+
 plotnames = ["KNMI", "WRF311", "ENSEMBLE"]
 for i in np.arange(3):
     plot_title = "TASMAX Bias of CRU 3.1 vs. %s (%s - %s)" % (
         plotnames[i], start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
     output_file = "%s_%s" % (fname, plotnames[i].lower())
-    print "creating %s" % (output_file,)
+    print("creating %s" % (output_file,))
     plotter.draw_contour_map(results[i, :], lats, lons, output_file,
                              gridshape=gridshape, ptitle=plot_title)
diff --git a/examples/multi_model_evaluation.py b/examples/multi_model_evaluation.py
index ba6ad56..6fa2837 100644
--- a/examples/multi_model_evaluation.py
+++ b/examples/multi_model_evaluation.py
@@ -50,23 +50,32 @@
     8. utils
 
 """
+from __future__ import print_function
 
 import datetime
-import urllib
-import numpy as np
+import ssl
+import sys
 from os import path
 
+import numpy as np
 
-# import Apache OCW dependences
 import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
-from ocw.dataset import Bounds as Bounds
 import ocw.dataset_processor as dsp
 import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
 import ocw.utils as utils
-import ssl
+from ocw.dataset import Bounds as Bounds
+
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
+
 if hasattr(ssl, '_create_unverified_context'):
     ssl._create_default_https_context = ssl._create_unverified_context
 
@@ -100,43 +109,37 @@
 
 
 # Download necessary NetCDF file if not present
-if path.exists(FILE_1):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+if not path.exists(FILE_1):
+    urlretrieve(FILE_LEADER + FILE_1, FILE_1)
 
-if path.exists(FILE_2):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+if not path.exists(FILE_2):
+    urlretrieve(FILE_LEADER + FILE_2, FILE_2)
 
-
-""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+# Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list.
 target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
 target_datasets.append(local.load_file(FILE_2, varName, name="UCT"))
 
 
-""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module.
 print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
 # the dataset_id and the parameter id were determined from
 # https://rcmes.jpl.nasa.gov/content/data-rcmes-database
 CRU31 = rcmed.parameter_dataset(
     10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
 
-""" Step 3: Resample Datasets so they are the same shape """
+# Step 3: Resample Datasets so they are the same shape.
 print("Resampling datasets")
 CRU31 = dsp.water_flux_unit_conversion(CRU31)
-CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
+CRU31 = dsp.temporal_rebin(CRU31, temporal_resolution='monthly')
 
 for member, each_target_dataset in enumerate(target_datasets):
     target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
-    target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[
-                                                             member])
+    target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
     target_datasets[member] = dsp.temporal_rebin(
-        target_datasets[member], datetime.timedelta(days=30))
+        target_datasets[member], temporal_resolution='monthly')
 
 
-""" Spatially Regrid the Dataset Objects to a user defined  grid """
+# Spatially Regrid the Dataset Objects to a user defined  grid.
 # Using the bounds we will create a new set of lats and lons
 print("Regridding datasets")
 new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
@@ -159,8 +162,8 @@
 _, CRU31.values = utils.calc_climatology_year(CRU31)
 
 for member, each_target_dataset in enumerate(target_datasets):
-    _, target_datasets[member].values = utils.calc_climatology_year(target_datasets[
-                                                                    member])
+    _, target_datasets[member].values =\
+        utils.calc_climatology_year(target_datasets[member])
 
 for target in target_datasets:
     allNames.append(target.name)
diff --git a/examples/multi_model_taylor_diagram.py b/examples/multi_model_taylor_diagram.py
index 8edee7b..197b922 100644
--- a/examples/multi_model_taylor_diagram.py
+++ b/examples/multi_model_taylor_diagram.py
@@ -48,9 +48,15 @@
     7. utils
 
 """
+from __future__ import print_function
 
-# Apache OCW lib immports
-from ocw.dataset import Dataset, Bounds
+import datetime
+import ssl
+import sys
+from os import path
+
+import numpy as np
+
 import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
 import ocw.dataset_processor as dsp
@@ -58,17 +64,19 @@
 import ocw.metrics as metrics
 import ocw.plotter as plotter
 import ocw.utils as utils
+from ocw.dataset import Bounds
 
-import datetime
-import numpy as np
-import urllib
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
 
-from os import path
-import ssl
 if hasattr(ssl, '_create_unverified_context'):
     ssl._create_default_https_context = ssl._create_unverified_context
 
-
 # File URL leader
 FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
 # Three Local Model Files
@@ -83,7 +91,7 @@
 LAT_MAX = 42.24
 LON_MIN = -24.0
 LON_MAX = 60.0
-START = datetime.datetime(2000, 01, 1)
+START = datetime.datetime(2000, 1, 1)
 END = datetime.datetime(2007, 12, 31)
 EVAL_BOUNDS = Bounds(lat_min=LAT_MIN, lat_max=LAT_MAX,
                      lon_min=LON_MIN, lon_max=LON_MAX, start=START, end=END)
@@ -101,35 +109,29 @@
 ref_datasets = []
 
 # Download necessary NetCDF file if not present
-if path.exists(FILE_1):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+if not path.exists(FILE_1):
+    urlretrieve(FILE_LEADER + FILE_1, FILE_1)
 
-if path.exists(FILE_2):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+if not path.exists(FILE_2):
+    urlretrieve(FILE_LEADER + FILE_2, FILE_2)
 
-if path.exists(FILE_3):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+if not path.exists(FILE_3):
+    urlretrieve(FILE_LEADER + FILE_3, FILE_3)
 
-""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+# Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list.
 target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
 target_datasets.append(local.load_file(FILE_2, varName, name="REGM3"))
 target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
 
 
-""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module.
 print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
 # the dataset_id and the parameter id were determined from
 # https://rcmes.jpl.nasa.gov/content/data-rcmes-database
 CRU31 = rcmed.parameter_dataset(
     10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
 
-""" Step 3: Resample Datasets so they are the same shape """
+# Step 3: Resample Datasets so they are the same shape.
 print("Resampling datasets ...")
 print("... on units")
 CRU31 = dsp.water_flux_unit_conversion(CRU31)
@@ -137,10 +139,10 @@
 CRU31 = dsp.temporal_rebin(CRU31, temporal_resolution='monthly')
 
 for member, each_target_dataset in enumerate(target_datasets):
-    target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[
-                                                             member])
-    target_datasets[member] = dsp.temporal_rebin(
-        target_datasets[member], temporal_resolution='monthly')
+    target_datasets[member] =\
+        dsp.water_flux_unit_conversion(target_datasets[member])
+    target_datasets[member] =\
+        dsp.temporal_rebin(target_datasets[member], temporal_resolution='monthly')
     target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
 
 # Regrid
@@ -166,8 +168,8 @@
 target_datasets.append(target_datasets_ensemble)
 
 for member, each_target_dataset in enumerate(target_datasets):
-    target_datasets[member].values = utils.calc_temporal_mean(target_datasets[
-                                                              member])
+    target_datasets[member].values =\
+        utils.calc_temporal_mean(target_datasets[member])
 
 allNames = []
 
@@ -185,7 +187,7 @@
                                                target_datasets,
                                                # 1 or more metrics to use in
                                                # the evaluation
-                                               [taylor_diagram])  # , mean_bias,spatial_std_dev_ratio, pattern_correlation])
+                                               [taylor_diagram])
 RCMs_to_CRU_evaluation.run()
 
 taylor_data = RCMs_to_CRU_evaluation.results[0]
diff --git a/examples/podaac_integration_example.py b/examples/podaac_integration_example.py
index be85884..81d8f12 100644
--- a/examples/podaac_integration_example.py
+++ b/examples/podaac_integration_example.py
@@ -35,6 +35,7 @@
     4. plotter
 
 """
+from __future__ import print_function
 
 import ocw.data_source.podaac_datasource as podaac
 import ocw.evaluation as evaluation
@@ -45,9 +46,10 @@
 variable = 'uwnd'
 name = 'PO.DAAC_test_dataset'
 OUTPUT_PLOT = "ccmp_temporal_std"
-""" Step 1: Download remote PO.DAAC Dataset and read it into an OCW Dataset Object"""
+# Step 1: Download remote PO.DAAC Dataset and read it into an OCW Dataset Object.
 print("Available Level4 PO.DAAC Granules: %s" % podaac.list_available_extract_granule_dataset_ids())
-print("Extracting variable '%s' from  Level4 granule '%s' and converting it into a OCW dataset object." % (variable, datasetId))
+print("Extracting variable '%s' from  Level4 granule '%s' and converting it into a OCW dataset."
+      % (variable, datasetId))
 ccmp_dataset = podaac.extract_l4_granule(
     variable=variable, dataset_id=datasetId, name=name)
 print("CCMP_Dataset.values shape: (times, lats, lons) - %s \n" %
@@ -57,12 +59,12 @@
 lats = ccmp_dataset.lats
 lons = ccmp_dataset.lons
 
-""" Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example """
+# Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example.
 # You can build your own metrics, but OCW also ships with some common metrics
 print("Setting up a Temporal STD metric to use for evaluation")
 std = metrics.TemporalStdDev()
 
-""" Step 3: Create an Evaluation Object using Datasets and our Metric """
+# Step 3: Create an Evaluation Object using Datasets and our Metric.
 # The Evaluation Class Signature is:
 # Evaluation(reference, targets, metrics, subregions=None)
 # Evaluation can take in multiple targets and metrics, so we need to convert
@@ -74,7 +76,7 @@
 print("Executing the Evaluation using the object's run() method")
 std_evaluation.run()
 
-""" Step 4: Make a Plot from the Evaluation.results """
+# Step 4: Make a Plot from the Evaluation.results.
 # The Evaluation.results are a set of nested lists to support many different
 # possible Evaluation scenarios.
 #
diff --git a/examples/simple_model_to_model_bias.py b/examples/simple_model_to_model_bias.py
index ad1f29b..8dc134d 100644
--- a/examples/simple_model_to_model_bias.py
+++ b/examples/simple_model_to_model_bias.py
@@ -43,18 +43,10 @@
     5. plotter
 
 """
+from __future__ import print_function
 
-import datetime
-from os import path
 import sys
-
-if sys.version_info[0] >= 3:
-    from urllib.request import urlretrieve
-else:
-    # Not Python 3 - today, it is most likely to be Python 2
-    # But note that this might need an update when Python 4
-    # might be around one day
-    from urllib import urlretrieve
+from os import path
 
 import numpy as np
 
@@ -64,6 +56,14 @@
 import ocw.metrics as metrics
 import ocw.plotter as plotter
 
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
+
 # File URL leader
 FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
 # Two Local Model Files
@@ -80,7 +80,7 @@
 if not path.exists(FILE_2_PATH):
     urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
 
-""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
+# Step 1: Load Local NetCDF Files into OCW Dataset Objects.
 print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
 knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
 print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" %
@@ -91,14 +91,14 @@
 print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" %
       (wrf_dataset.values.shape,))
 
-""" Step 2: Temporally Rebin the Data into an Annual Timestep """
+# Step 2: Temporally Rebin the Data into an Annual Timestep.
 print("Temporally Rebinning the Datasets to an Annual Timestep")
 knmi_dataset = dsp.temporal_rebin(knmi_dataset, temporal_resolution='annual')
 wrf_dataset = dsp.temporal_rebin(wrf_dataset, temporal_resolution='annual')
 print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
 print("WRF_Dataset.values shape: %s \n\n" % (wrf_dataset.values.shape,))
 
-""" Step 3: Spatially Regrid the Dataset Objects to a 1 degree grid """
+# Step 3: Spatially Regrid the Dataset Objects to a 1 degree grid.
 #  The spatial_boundaries() function returns the spatial extent of the dataset
 print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
       "%s\n" % (knmi_dataset.spatial_boundaries(), ))
@@ -121,12 +121,12 @@
 print("Final shape of the WRF_Dataset: \n"
       "%s\n" % (wrf_dataset.values.shape, ))
 
-""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
 # You can build your own metrics, but OCW also ships with some common metrics
 print("Setting up a Bias metric to use for evaluation")
 bias = metrics.Bias()
 
-""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# Step 5: Create an Evaluation Object using Datasets and our Metric.
 # The Evaluation Class Signature is:
 # Evaluation(reference, targets, metrics, subregions=None)
 # Evaluation can take in multiple targets and metrics, so we need to convert
@@ -136,7 +136,7 @@
 print("Executing the Evaluation using the object's run() method")
 bias_evaluation.run()
 
-""" Step 6: Make a Plot from the Evaluation.results """
+# Step 6: Make a Plot from the Evaluation.results.
 # The Evaluation.results are a set of nested lists to support many different
 # possible Evaluation scenarios.
 #
diff --git a/examples/simple_model_tstd.py b/examples/simple_model_tstd.py
index 6412493..75cb5c0 100644
--- a/examples/simple_model_tstd.py
+++ b/examples/simple_model_tstd.py
@@ -38,15 +38,24 @@
     4. plotter
 
 """
+from __future__ import print_function
 
+import sys
 from os import path
-import urllib
 
 import ocw.data_source.local as local
 import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
 
+if sys.version_info[0] >= 3:
+    from urllib.request import urlretrieve
+else:
+    # Not Python 3 - today, it is most likely to be Python 2
+    # But note that this might need an update when Python 4
+    # might be around one day
+    from urllib import urlretrieve
+
 # File URL leader
 FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
 # One Local Model Files
@@ -56,12 +65,10 @@
 OUTPUT_PLOT = "knmi_temporal_std"
 
 # Download necessary NetCDF file if needed
-if path.exists(FILE_1):
-    pass
-else:
-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+if not path.exists(FILE_1):
+    urlretrieve(FILE_LEADER + FILE_1, FILE_1)
 
-""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+# Step 1: Load Local NetCDF File into OCW Dataset Objects.
 print("Loading %s into an OCW Dataset Object" % (FILE_1,))
 # 'tasmax' is variable name of values
 knmi_dataset = local.load_file(FILE_1, "tasmax")
@@ -72,12 +79,12 @@
 lats = knmi_dataset.lats
 lons = knmi_dataset.lons
 
-""" Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example """
+# Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example.
 # You can build your own metrics, but OCW also ships with some common metrics
 print("Setting up a Temporal STD metric to use for evaluation")
 std = metrics.TemporalStdDev()
 
-""" Step 3: Create an Evaluation Object using Datasets and our Metric """
+# Step 3: Create an Evaluation Object using Datasets and our Metric.
 # The Evaluation Class Signature is:
 # Evaluation(reference, targets, metrics, subregions=None)
 # Evaluation can take in multiple targets and metrics, so we need to convert
@@ -89,7 +96,7 @@
 print("Executing the Evaluation using the object's run() method")
 std_evaluation.run()
 
-""" Step 4: Make a Plot from the Evaluation.results """
+# Step 4: Make a Plot from the Evaluation.results.
 # The Evaluation.results are a set of nested lists to support many different
 # possible Evaluation scenarios.
 #
diff --git a/examples/subregions_portrait_diagram.py b/examples/subregions_portrait_diagram.py
index 3e6785c..ee5ea37 100644
--- a/examples/subregions_portrait_diagram.py
+++ b/examples/subregions_portrait_diagram.py
@@ -53,15 +53,15 @@
     8. utils

 

 """

+from __future__ import print_function

 

-from os import path

-import urllib

-import ssl

 import datetime

+import ssl

+import sys

+from os import path

+

 import numpy as np

 

-# Apache OCW lib immports

-from ocw.dataset import Bounds

 import ocw.data_source.local as local

 import ocw.data_source.rcmed as rcmed

 import ocw.dataset_processor as dsp

@@ -69,6 +69,15 @@
 import ocw.metrics as metrics

 import ocw.plotter as plotter

 import ocw.utils as utils

+from ocw.dataset import Bounds

+

+if sys.version_info[0] >= 3:

+    from urllib.request import urlretrieve

+else:

+    # Not Python 3 - today, it is most likely to be Python 2

+    # But note that this might need an update when Python 4

+    # might be around one day

+    from urllib import urlretrieve

 

 if hasattr(ssl, '_create_unverified_context'):

     ssl._create_default_https_context = ssl._create_unverified_context

@@ -87,7 +96,7 @@
 LAT_MAX = 42.24

 LON_MIN = -24.0

 LON_MAX = 60.0

-START = datetime.datetime(2000, 01, 1)

+START = datetime.datetime(2000, 1, 1)

 END = datetime.datetime(2007, 12, 31)

 EVAL_BOUNDS = Bounds(lat_min=LAT_MIN, lat_max=LAT_MAX, lon_min=LON_MIN,

                      lon_max=LON_MAX, start=START, end=END)

@@ -106,13 +115,13 @@
 

 # Download necessary NetCDF file if not present

 if not path.exists(FILE_1):

-    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)

+    urlretrieve(FILE_LEADER + FILE_1, FILE_1)

 

 if not path.exists(FILE_2):

-    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)

+    urlretrieve(FILE_LEADER + FILE_2, FILE_2)

 

 if not path.exists(FILE_3):

-    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)

+    urlretrieve(FILE_LEADER + FILE_3, FILE_3)

 

 # Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list

 target_datasets.append(local.load_file(FILE_1, varName, name='KNMI'))

@@ -181,7 +190,7 @@
     Bounds(lat_min=30.0, lat_max=40.0, lon_min=-15.0, lon_max=0.0),

     Bounds(lat_min=33.0, lat_max=40.0, lon_min=25.0, lon_max=35.00)]

 

-region_list = ['R' + str(i + 1) for i in xrange(13)]

+region_list = ['R' + str(i + 1) for i in range(13)]

 

 # metrics

 pattern_correlation = metrics.PatternCorrelation()

diff --git a/examples/subregions_rectangular_boundaries.py b/examples/subregions_rectangular_boundaries.py
index cf396bc..2686709 100644
--- a/examples/subregions_rectangular_boundaries.py
+++ b/examples/subregions_rectangular_boundaries.py
@@ -31,13 +31,14 @@
     2. plotter

 

 """

+from __future__ import print_function

 

 import datetime

+

 import numpy as np

 

-# Apache OCW lib immports

-from ocw.dataset import Bounds

 import ocw.plotter as plotter

+from ocw.dataset import Bounds

 

 OUTPUT_PLOT = "subregions"

 

@@ -46,7 +47,7 @@
 LAT_MAX = 42.24

 LON_MIN = -24.0

 LON_MAX = 60.0

-START_SUB = datetime.datetime(2000, 01, 1)

+START_SUB = datetime.datetime(2000, 1, 1)

 END_SUB = datetime.datetime(2007, 12, 31)

 

 # regridding parameters

diff --git a/examples/subset_TRMM_data_for_NCA_regions.py b/examples/subset_TRMM_data_for_NCA_regions.py
index 4ae4300..ac8cc26 100644
--- a/examples/subset_TRMM_data_for_NCA_regions.py
+++ b/examples/subset_TRMM_data_for_NCA_regions.py
@@ -38,17 +38,17 @@
     4. plotter
 
 """
-
-# Apache OCW lib immports
-import ocw.dataset_processor as dsp
-from ocw.dataset import Bounds
-import ocw.data_source.rcmed as rcmed
-import ocw.plotter as plotter
-
-from datetime import datetime
-import numpy.ma as ma
+from __future__ import print_function
 
 import ssl
+from datetime import datetime
+
+import numpy.ma as ma
+
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.plotter as plotter
+from ocw.dataset import Bounds
 
 if hasattr(ssl, '_create_unverified_context'):
     ssl._create_default_https_context = ssl._create_unverified_context
@@ -74,8 +74,8 @@
 plotter.draw_contour_map(ma.mean(TRMM_dataset2.values, axis=0), TRMM_dataset2.lats,
                          TRMM_dataset2.lons, fname='TRMM_without_Cuba_and_Bahamas')
 
-NCA_SW_bounds = Bounds(boundary_type='us_states', us_states=[
-                       'CA', 'NV', 'UT', 'AZ', 'NM', 'CO'])
+NCA_SW_bounds = \
+    Bounds(boundary_type='us_states', us_states=['CA', 'NV', 'UT', 'AZ', 'NM', 'CO'])
 # to mask out the data over Mexico and Canada
 TRMM_dataset3 = dsp.subset(TRMM_dataset2, NCA_SW_bounds, extract=True)
 
diff --git a/examples/taylor_diagram_example.py b/examples/taylor_diagram_example.py
index 8f683c1..5ce57c6 100644
--- a/examples/taylor_diagram_example.py
+++ b/examples/taylor_diagram_example.py
@@ -45,11 +45,21 @@
     6. plotter
 
 """
+from __future__ import print_function
 
 import datetime
 import sys
 from os import path
 
+import numpy
+
+import ocw.data_source.local as local
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+from ocw.dataset import Bounds
+
 if sys.version_info[0] >= 3:
     from urllib.request import urlretrieve
 else:
@@ -58,15 +68,6 @@
     # might be around one day
     from urllib import urlretrieve
 
-import numpy
-
-from ocw.dataset import Bounds
-import ocw.data_source.local as local
-import ocw.dataset_processor as dsp
-import ocw.evaluation as evaluation
-import ocw.metrics as metrics
-import ocw.plotter as plotter
-
 FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
 FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
 FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
diff --git a/examples/temperature_trends_over_CONUS.py b/examples/temperature_trends_over_CONUS.py
index 8510294..31725ab 100644
--- a/examples/temperature_trends_over_CONUS.py
+++ b/examples/temperature_trends_over_CONUS.py
@@ -43,27 +43,26 @@
     5. utlis
 
 """
+from __future__ import print_function
 
-import numpy as np
 import datetime
 
-# import Apache OCW dependences
+import numpy as np
+
 import ocw.data_source.local as local
-from ocw.dataset import Bounds as Bounds
 import ocw.dataset_processor as dsp
-import ocw.metrics as metrics
 import ocw.plotter as plotter
 import ocw.utils as utils
+from ocw.dataset import Bounds as Bounds
 
 # nClimGrid observation file
 file_obs = 'nClimGrid/nClimGrid_tave_1895-2005.nc'
 
 # CMIP5 simulations
 model_file_path = 'CMIP5_historical'
-dataset_name = ['BNU-ESM','CanESM2','CNRM-CM5','CSIRO-Mk3',
-                'GISS-E2-H','HadCM3','inmcm4','MIROC-ESM',
-                'MPI-ESM-LR','NorESM1-M']
-nmodel = len(dataset_name) # number of CMIP5 simulations
+dataset_name = ['BNU-ESM', 'CanESM2', 'CNRM-CM5', 'CSIRO-Mk3', 'GISS-E2-H', 'HadCM3', 'inmcm4',
+                'MIROC-ESM', 'MPI-ESM-LR', 'NorESM1-M']
+nmodel = len(dataset_name)  # number of CMIP5 simulations
 
 # temporal boundary
 start_date = datetime.datetime(1979, 12, 1)
@@ -71,38 +70,31 @@
 
 nyear = 26
 
-month_start = 6 # June
+month_start = 6  # June
 month_end = 8   # August
 
 regions = []
 regions.append(['WA', 'OR', 'ID'])
-regions.append(['CA', 'NV', 'AZ', 'NM', 'UT','CO'])
+regions.append(['CA', 'NV', 'AZ', 'NM', 'UT', 'CO'])
 regions.append(['MT', 'WY', 'ND', 'SD', 'NE'])
 regions.append(['KS', 'TX', 'OK'])
-regions.append(['MN', 'IA', 'MO', 'WI', 'IL', 'IN', 'MI','OH'])
-regions.append(['ME', 'VT', 'NH', 'MA', 'NY', 'CT', 'RI','NJ','PA','WV','DE', 'MD'])
-regions.append(['KY', 'VA', 'AR','AL', 'LA','MS', 'FL', 'GA','NC','SC', 'DC','TN'])
+regions.append(['MN', 'IA', 'MO', 'WI', 'IL', 'IN', 'MI', 'OH'])
+regions.append(['ME', 'VT', 'NH', 'MA', 'NY', 'CT', 'RI', 'NJ', 'PA', 'WV', 'DE', 'MD'])
+regions.append(['KY', 'VA', 'AR', 'AL', 'LA', 'MS', 'FL', 'GA', 'NC', 'SC', 'DC', 'TN'])
 
 plotter.fill_US_states_with_color(regions, 'NCA_seven_regions', colors=True,
-                                region_names=['NW','SW','NGP','SGP','MW','NE','SE'])
+                                  region_names=['NW', 'SW', 'NGP', 'SGP', 'MW', 'NE', 'SE'])
 
-n_region = 7 # number of regions
+n_region = 7  # number of regions
 
 # CONUS regional boundaries
-NW_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[0])
-SW_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[1])
-NGP_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[2])
-SGP_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[3])
-MW_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[4])
-NE_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[5])
-SE_bounds = Bounds(boundary_type='us_states',
-                   us_states=regions[6])
+NW_bounds = Bounds(boundary_type='us_states', us_states=regions[0])
+SW_bounds = Bounds(boundary_type='us_states', us_states=regions[1])
+NGP_bounds = Bounds(boundary_type='us_states', us_states=regions[2])
+SGP_bounds = Bounds(boundary_type='us_states', us_states=regions[3])
+MW_bounds = Bounds(boundary_type='us_states', us_states=regions[4])
+NE_bounds = Bounds(boundary_type='us_states', us_states=regions[5])
+SE_bounds = Bounds(boundary_type='us_states', us_states=regions[6])
 
 regional_bounds = [NW_bounds, SW_bounds, NGP_bounds,
                    SGP_bounds, MW_bounds, NE_bounds, SE_bounds]
@@ -115,28 +107,27 @@
                                           dataset_name=dataset_name, variable_unit='K')
 
 """ Temporal subset of obs_dataset """
-obs_dataset_subset = dsp.temporal_slice(obs_dataset,
-                  start_time=start_date, end_time=end_date)
-obs_dataset_season = dsp.temporal_subset(obs_dataset_subset, month_start, month_end,
-                      average_each_year=True)
+obs_dataset_subset = dsp.temporal_slice(obs_dataset, start_time=start_date, end_time=end_date)
+obs_dataset_season =\
+    dsp.temporal_subset(obs_dataset_subset, month_start, month_end, average_each_year=True)
 
 """ Temporal subset of model_dataset """
-model_dataset_subset = [dsp.temporal_slice(dataset,start_time=start_date, end_time=end_date)
+model_dataset_subset = [dsp.temporal_slice(dataset, start_time=start_date, end_time=end_date)
                         for dataset in model_dataset]
-model_dataset_season = [dsp.temporal_subset(dataset, month_start, month_end,
-                      average_each_year=True) for dataset in model_dataset_subset]
-
+model_dataset_season = [dsp.temporal_subset(dataset, month_start, month_end, average_each_year=True)
+                        for dataset in model_dataset_subset]
 
 """ Spatial subset of obs_dataset and generate time series """
 obs_timeseries = np.zeros([nyear, n_region])   # region index 0-6: NW, SW, NGP, SGP, MW, NE, SE
 model_timeseries = np.zeros([nmodel, nyear, n_region])
 
 for iregion in np.arange(n_region):
-    obs_timeseries[:, iregion] = utils.calc_time_series(
-                         dsp.subset(obs_dataset_season, regional_bounds[iregion]))
+    obs_timeseries[:, iregion] =\
+        utils.calc_time_series(dsp.subset(obs_dataset_season, regional_bounds[iregion]))
     for imodel in np.arange(nmodel):
-        model_timeseries[imodel, :, iregion] = utils.calc_time_series(
-                         dsp.subset(model_dataset_season[imodel], regional_bounds[iregion]))
+        model_timeseries[imodel, :, iregion] =\
+            utils.calc_time_series(dsp.subset(model_dataset_season[imodel],
+                                              regional_bounds[iregion]))
 
 year = np.arange(nyear)
 
@@ -148,34 +139,36 @@
 regional_trends_ens_error = np.zeros(n_region)
 
 for iregion in np.arange(n_region):
-    regional_trends_obs[iregion], regional_trends_obs_error[iregion] = utils.calculate_temporal_trend_of_time_series(
-                                                                       year, obs_timeseries[:,iregion])
-    for imodel in np.arange(nmodel):
-        regional_trends_model[imodel, iregion], regional_trends_model_error[iregion] = utils.calculate_temporal_trend_of_time_series(
-                                                                                       year, model_timeseries[imodel, :, iregion])
-    regional_trends_ens[iregion], regional_trends_ens_error[iregion] = utils.calculate_ensemble_temporal_trends(
-                                                                       model_timeseries[:, :, iregion])
+    regional_trends_obs[iregion], regional_trends_obs_error[iregion] =\
+        utils.calculate_temporal_trend_of_time_series(year, obs_timeseries[:, iregion])
 
-""" Generate plots """
+    for imodel in np.arange(nmodel):
+        regional_trends_model[imodel, iregion], regional_trends_model_error[iregion] = \
+            utils.calculate_temporal_trend_of_time_series(year,
+                                                          model_timeseries[imodel, :, iregion])
+    regional_trends_ens[iregion], regional_trends_ens_error[iregion] =\
+        utils.calculate_ensemble_temporal_trends(model_timeseries[:, :, iregion])
+
+# Generate plots
 
 plotter.fill_US_states_with_color(regions, 'nClimGrid_tave_trends_JJA_1980-2005',
                                   values=regional_trends_obs,
-                                  region_names=['%.3f' %(10*i) for i in regional_trends_obs])
+                                  region_names=['%.3f' % (10*i) for i in regional_trends_obs])
 
 plotter.fill_US_states_with_color(regions, 'CMIP5_ENS_tave_trends_JJA_1980-2005',
                                   values=regional_trends_ens,
-                                  region_names=['%.3f' %(10*i) for i in regional_trends_ens])
+                                  region_names=['%.3f' % (10*i) for i in regional_trends_ens])
 
 bias_ens = regional_trends_ens - regional_trends_obs
-plotter.fill_US_states_with_color(regions, 'CMIP5_ENS_tave_trends_bias_from_nClimGrid_JJA_1980-2005',
+plotter.fill_US_states_with_color(regions,
+                                  'CMIP5_ENS_tave_trends_bias_from_nClimGrid_JJA_1980-2005',
                                   values=bias_ens,
-                                  region_names=['%.3f' %(10*i) for i in bias_ens])
+                                  region_names=['%.3f' % (10*i) for i in bias_ens])
 
 obs_data = np.vstack([regional_trends_obs, regional_trends_obs_error])
 ens_data = np.vstack([regional_trends_ens, regional_trends_ens_error])
 
 plotter.draw_plot_to_compare_trends(obs_data, ens_data, regional_trends_model,
                                     fname='Trends_comparison_btn_CMIP5_and_nClimGrid',
-                                    data_labels=['NW','SW','NGP','SGP','MW','NE','SE'],
+                                    data_labels=['NW', 'SW', 'NGP', 'SGP', 'MW', 'NE', 'SE'],
                                     xlabel='NCA regions', ylabel='tas trend [K/year]')
-
diff --git a/examples/time_series_with_regions.py b/examples/time_series_with_regions.py
index d92599b..ad4be38 100644
--- a/examples/time_series_with_regions.py
+++ b/examples/time_series_with_regions.py
@@ -47,22 +47,22 @@
     5. plotter
 
 """
+from __future__ import print_function
 
-import sys
 import datetime
-from os import path
-from calendar import monthrange
 import ssl
+import sys
+from calendar import monthrange
+from os import path
 
 import numpy as np
 
-# Apache OCW lib immports
-from ocw.dataset import Bounds
 import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
 import ocw.dataset_processor as dsp
 import ocw.plotter as plotter
 import ocw.utils as utils
+from ocw.dataset import Bounds
 
 if sys.version_info[0] >= 3:
     from urllib.request import urlretrieve
@@ -185,7 +185,7 @@
     Bounds(lat_min=30.0, lat_max=40.0, lon_min=-15.0, lon_max=0.0),
     Bounds(lat_min=33.0, lat_max=40.0, lon_min=25.0, lon_max=35.0)]
 
-region_list = [['R' + str(i + 1)] for i in xrange(13)]
+region_list = [['R' + str(i + 1)] for i in range(13)]
 
 for regions in region_list:
     firstTime = True
diff --git a/setup.py b/setup.py
index 72510cf..390b672 100644
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,7 @@
 ]
 _description = 'Apache Open Climate Workbench'
 _download_url = 'http://pypi.python.org/pypi/ocw/'
-_requirements = []
+_requirements = ['numpy', ]
 _keywords = ['climate analysis', 'workbench', 'rebinning',
              'metrics', 'computation', 'visualization']
 _license = 'Apache License, Version 2.0'