Merge branch 'master' into datastax-cass-driver
diff --git a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/BaseTest.cs b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/BaseTest.cs
index 1669569..b83cf7e 100644
--- a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/BaseTest.cs
+++ b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/BaseTest.cs
@@ -32,7 +32,24 @@
 	            _config = config;
 	    }
 
-	    protected string Organization
+        /// <summary>
+        /// The URI of the Usergrid API, which defaults to api.usergrid.com if none is specified, just like the Client object does
+        /// </summary>
+        protected string ApiUri
+        {
+            get
+            {
+                var apiUri = GetAppSetting("apiUri");
+                if (String.IsNullOrWhiteSpace(apiUri))
+                {
+                    apiUri = "http://api.usergrid.com";
+                }
+
+                return apiUri;
+            }
+        }
+
+        protected string Organization
 		{
 			get{ return GetAppSetting("organization");}
 		}
@@ -84,12 +101,12 @@
 
         private string GetAppSetting(string key)
         {
-            return _config == null ? ConfigurationManager.AppSettings[key] : _config.AppSettings.Settings[key].Value;
+            return _config == null ? ConfigurationManager.AppSettings[key] : _config.AppSettings.Settings[key]?.Value;
         }
 
         protected IClient InitializeClientAndLogin(AuthType authType)
         {
-            var client = new Client(Organization, Application);
+            var client = new Client(Organization, Application, ApiUri);
             if (authType == AuthType.Application || authType == AuthType.Organization)
                 client.Login(ClientId, ClientSecret, authType);
             else if (authType == AuthType.User)
diff --git a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/EntityPagingTests.cs b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/EntityPagingTests.cs
index fbf615f..79234de 100644
--- a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/EntityPagingTests.cs
+++ b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/EntityPagingTests.cs
@@ -31,7 +31,7 @@
 		[Test]
 		public void ShouldDoPaging()
 		{
-			var client = new Client(Organization, Application);
+			var client = new Client(Organization, Application, ApiUri);
 			client.Login(ClientId, ClientSecret, AuthType.Organization);
 
 			for (var i=0; i<20; i++) 
diff --git a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/GroupTests.cs b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/GroupTests.cs
index 16bfa2c..4213c24 100644
--- a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/GroupTests.cs
+++ b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/GroupTests.cs
@@ -30,7 +30,7 @@
         [Test]
         public void ShouldManageGroupLifecycle()
         {
-            var client = new Client(Organization, Application);
+            var client = new Client(Organization, Application, ApiUri);
             client.Login(ClientId, ClientSecret, AuthType.Organization);
 
             var group = client.GetGroup<MyUsergridGroup>("group1");
@@ -68,7 +68,7 @@
         [Test]
         public void ShouldManageUsersInGroup()
         {
-            var client = new Client(Organization, Application);
+            var client = new Client(Organization, Application, ApiUri);
             client.Login(ClientId, ClientSecret, AuthType.Organization);
 
             var user = SetupUsergridUser(client, new MyUsergridUser {UserName = "user1", Password = "user1", Email = "user1@gmail.com", City = "city1"});
diff --git a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/LoginTests.cs b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/LoginTests.cs
index aa4e1de..ea2590c 100644
--- a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/LoginTests.cs
+++ b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/LoginTests.cs
@@ -24,14 +24,14 @@
         [Test]
         public void ShouldLoginSuccessfullyWithClientCredentials()
         {
-            var client = new Client(Organization, Application);
+            var client = new Client(Organization, Application, ApiUri);
             client.Login(ClientId, ClientSecret, AuthType.Organization);
         }
 
 		[Test]
 		public void ShouldThrowWithInvalidOrganizationCredentials()
 		{
-			var client = new Client (Organization, Application);
+			var client = new Client (Organization, Application, ApiUri);
 
 			try
 			{
@@ -48,14 +48,14 @@
 		[Test]
 		public void ShouldLoginSuccessfullyWithApplicationCredentials()
 		{
-			var client = new Client(Organization, Application);
+			var client = new Client(Organization, Application, ApiUri);
 			client.Login(ApplicationId, ApplicationSecret, AuthType.Application);
 		}
 
 		[Test]
 		public void ShouldThrowWithInvalidApplicationCredentials()
 		{
-			var client = new Client (Organization, Application);
+			var client = new Client (Organization, Application, ApiUri);
 
 			try
 			{
@@ -72,14 +72,14 @@
 		[Test]
 		public void ShouldLoginSuccessfullyWithUserCredentials()
 		{
-			var client = new Client(Organization, Application);
+			var client = new Client(Organization, Application, ApiUri);
 			client.Login(UserId, UserSecret, AuthType.User);
 		}
 
         [Test]
         public void ShouldThrowWithInvalidUserCredentials()
         {
-            var client = new Client(Organization, Application);
+            var client = new Client(Organization, Application, ApiUri);
 
             try
             {
diff --git a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/Usergrid.Sdk.IntegrationTests.dll.config b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/Usergrid.Sdk.IntegrationTests.dll.config
index 9785ff4..e8fd2e5 100644
--- a/sdks/dotnet/Usergrid.Sdk.IntegrationTests/Usergrid.Sdk.IntegrationTests.dll.config
+++ b/sdks/dotnet/Usergrid.Sdk.IntegrationTests/Usergrid.Sdk.IntegrationTests.dll.config
@@ -18,6 +18,7 @@
 
 <configuration>
 	<appSettings>
+                <add key="apiUri" value="BASE_URI_OF_USERGRID_API" />
 		<add key="organization" value="ORGANIZATION_NAME" />
 		<add key="application" value="APPLICATION_NAME" />
 		<add key="clientId" value="CLIENT_ID" />
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridClient.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridClient.java
index 87a84ae..a317d07 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridClient.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridClient.java
@@ -72,6 +72,8 @@
 
     @NotNull public String clientAppUrl() { return getBaseUrl() + "/" + getOrgId() + "/" + getAppId(); }
 
+    @NotNull public String managementUrl() { return getBaseUrl() + "/management"; }
+
     @NotNull public UsergridAuthMode getAuthMode() { return this.config.authMode; }
     public void setAuthMode(@NotNull final UsergridAuthMode authMode) { this.config.authMode = authMode; }
 
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridRequestManager.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridRequestManager.java
index ef771e7..7099d42 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridRequestManager.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/UsergridRequestManager.java
@@ -60,7 +60,18 @@
     @NotNull
     private UsergridResponse authenticate(@NotNull final UsergridAuth auth) {
         Map<String, String> credentials = auth.credentialsMap();
-        UsergridRequest request = new UsergridRequest(UsergridHttpMethod.POST, UsergridRequest.APPLICATION_JSON_MEDIA_TYPE, this.usergridClient.clientAppUrl(), null, credentials, this.usergridClient.authForRequests(), "token");
+        String url = this.usergridClient.clientAppUrl();
+        if ( auth instanceof UsergridUserAuth){
+
+            UsergridUserAuth userAuth = (UsergridUserAuth) auth;
+            if( userAuth.isAdminUser()){
+
+                url = this.usergridClient.managementUrl();
+            }
+
+        }
+
+        UsergridRequest request = new UsergridRequest(UsergridHttpMethod.POST, UsergridRequest.APPLICATION_JSON_MEDIA_TYPE, url, null, credentials, this.usergridClient.authForRequests(), "token");
         UsergridResponse response = performRequest(request);
         if (!isEmpty(response.getAccessToken()) && !isEmpty(response.getExpires())) {
             auth.setAccessToken(response.getAccessToken());
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridAuth.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridAuth.java
index 94b4809..1ed61da 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridAuth.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridAuth.java
@@ -28,6 +28,7 @@
     @Nullable private String accessToken = null;
     @Nullable private Long expiry = null;
     private boolean usingToken = false;
+    private boolean isAdminUser = false;
 
     public UsergridAuth() { }
 
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridUserAuth.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridUserAuth.java
index e1831c2..7cb42ad 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridUserAuth.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/auth/UsergridUserAuth.java
@@ -17,7 +17,6 @@
 package org.apache.usergrid.java.client.auth;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.usergrid.java.client.model.UsergridUser;
 import org.jetbrains.annotations.NotNull;
 
 import java.util.HashMap;
@@ -27,6 +26,7 @@
 
     @NotNull private String username;
     @NotNull private String password;
+    private boolean isAdminUser = false;
 
     @NotNull public String getUsername() { return username; }
     public void setUsername(@NotNull final String username) { this.username = username; }
@@ -34,6 +34,8 @@
     @NotNull private String getPassword() { return password; }
     public void setPassword(@NotNull final String password) { this.password = password; }
 
+    public boolean isAdminUser() { return isAdminUser; }
+
     @NotNull
     @Override
     public HashMap<String, String> credentialsMap() {
@@ -48,9 +50,19 @@
         this("","");
     }
 
-    public UsergridUserAuth(@JsonProperty("username") @NotNull final String username, @JsonProperty("password") @NotNull final String password) {
+    public UsergridUserAuth(@JsonProperty("username") @NotNull final String username,
+                            @JsonProperty("password") @NotNull final String password) {
         super();
         this.username = username;
         this.password = password;
     }
+
+    public UsergridUserAuth(@JsonProperty("username") @NotNull final String username,
+                            @JsonProperty("password") @NotNull final String password,
+                            @JsonProperty("isAdminUser") final boolean isAdminUser) {
+        super();
+        this.username = username;
+        this.password = password;
+        this.isAdminUser = isAdminUser;
+    }
 }
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/model/UsergridEntity.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/model/UsergridEntity.java
index e3dbb77..3444de0 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/model/UsergridEntity.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/model/UsergridEntity.java
@@ -412,7 +412,7 @@
     }
 
     @JsonAnyGetter @NotNull
-    private Map<String, JsonNode> getProperties() {
+    public Map<String, JsonNode> getProperties() {
         return this.properties;
     }
 
diff --git a/sdks/java/src/main/java/org/apache/usergrid/java/client/query/UsergridQuery.java b/sdks/java/src/main/java/org/apache/usergrid/java/client/query/UsergridQuery.java
index dc359c0..e8c78c2 100644
--- a/sdks/java/src/main/java/org/apache/usergrid/java/client/query/UsergridQuery.java
+++ b/sdks/java/src/main/java/org/apache/usergrid/java/client/query/UsergridQuery.java
@@ -261,8 +261,10 @@
     }
 
     @NotNull
-    public UsergridQuery ql(@NotNull final String value) {
-        return this.addRequirement(value);
+    public UsergridQuery ql(final String value) {
+
+        return value != null && !value.isEmpty() ? this.addRequirement(value) : this;
+
     }
 
     @NotNull
@@ -374,11 +376,6 @@
         }
 
         String requirementsString = this.constructRequirementString();
-        if (!requirementsString.isEmpty()) {
-            requirementsString = SELECT_ALL + SPACE + WHERE + SPACE + requirementsString;
-        } else {
-            requirementsString = SELECT_ALL + SPACE;
-        }
 
         String orderByString = this.constructOrderByString();
         if (!orderByString.isEmpty()) {
diff --git a/sdks/python/.gitignore b/sdks/python/.gitignore
new file mode 100644
index 0000000..ba74660
--- /dev/null
+++ b/sdks/python/.gitignore
@@ -0,0 +1,57 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/sdks/python/GUIDE.md b/sdks/python/GUIDE.md
new file mode 100644
index 0000000..0719005
--- /dev/null
+++ b/sdks/python/GUIDE.md
@@ -0,0 +1,2 @@
+
+https://docs.python.org/2/distutils/packageindex.html
\ No newline at end of file
diff --git a/sdks/python/LICENSE b/sdks/python/LICENSE
new file mode 100755
index 0000000..8f71f43
--- /dev/null
+++ b/sdks/python/LICENSE
@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/sdks/python/README.md b/sdks/python/README.md
new file mode 100755
index 0000000..cc2af97
--- /dev/null
+++ b/sdks/python/README.md
@@ -0,0 +1,16 @@
+# Usergrid Python SDK
+
+# Overview
+This is a starter project for the Usergrid Python SDK.  It is a work in progress.
+
+# Installation
+
+## PIP (http://pip.readthedocs.org/en/stable/installing/)
+
+`pip install usergrid`
+
+## Manual installation
+
+- `git clone git@github.com:jwest-apigee/usergrid-python.git`
+- `cd usergrid-python`
+- `pip install -e .`
\ No newline at end of file
diff --git a/sdks/python/README.rst b/sdks/python/README.rst
new file mode 100755
index 0000000..659384c
--- /dev/null
+++ b/sdks/python/README.rst
@@ -0,0 +1,20 @@
+**********
+Overview
+**********
+
+This is a starter project for the Usergrid Python SDK.  It is a work in progress.
+
+**************************
+Installation
+**************************
+
+================================================
+Installation From Pypi Using PIP
+================================================
+
+PIP is a package manager for Python.  For more information please view the information here: `PIP Installation Guide <http://pip.readthedocs.org/en/stable/installing/>`_
+
+From the command line::
+
+  pip install usergrid
+
diff --git a/sdks/python/sample_app.py b/sdks/python/sample_app.py
new file mode 100755
index 0000000..9deefbe
--- /dev/null
+++ b/sdks/python/sample_app.py
@@ -0,0 +1,77 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from usergrid import Usergrid
+
+__author__ = 'Jeff West @ ApigeeCorporation'
+
+
+def main():
+    Usergrid.init(org_id='jwest1',
+                  app_id='sandbox')
+
+    response = Usergrid.DELETE('pets', 'max')
+
+    if not response.ok:
+        print 'Failed to delete max: %s' % response
+        exit()
+
+    response = Usergrid.DELETE('owners', 'jeff')
+
+    if not response.ok:
+        print 'Failed to delete Jeff: %s' % response
+        exit()
+
+    response = Usergrid.POST('pets', {'name': 'max'})
+
+    if response.ok:
+        pet = response.first()
+
+        print pet
+
+        response = Usergrid.POST('owners', {'name': 'jeff'})
+
+        if response.ok:
+            owner = response.first()
+
+            print owner
+
+            response = pet.connect('ownedBy', owner)
+
+            if response.ok:
+                print 'Connected!'
+
+                response = pet.disconnect('ownedBy', owner)
+
+                if response.ok:
+                    print 'all done!'
+                else:
+                    print response
+
+            else:
+                print 'failed to connect: %s' % response
+
+        else:
+            print 'Failed to create Jeff: %s' % response
+
+    else:
+        print response
+
+
+main()
diff --git a/sdks/python/setup.py b/sdks/python/setup.py
new file mode 100755
index 0000000..8a2d332
--- /dev/null
+++ b/sdks/python/setup.py
@@ -0,0 +1,51 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff West @ ApigeeCorporation'
+
+from setuptools import setup, find_packages
+
+VERSION = '0.1.13.1'
+
+with open('README.rst') as file:
+    long_description = file.read()
+
+setup(
+    name='usergrid',
+    version=VERSION,
+    description='Usergrid SDK for Python',
+    url='http://usergrid.apache.org',
+    download_url="https://codeload.github.com/jwest-apigee/usergrid-python/zip/v" + VERSION,
+    author='Jeff West',
+    author_email='jwest@apigee.com',
+    packages=find_packages(),
+    long_description=long_description,
+    install_requires=[
+        'requests',
+        'urllib3'
+    ],
+    entry_points={
+    },
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'Intended Audience :: Developers',
+        'Operating System :: OS Independent',
+        'Topic :: Software Development',
+    ]
+)
diff --git a/sdks/python/usergrid/UsergridApplication.py b/sdks/python/usergrid/UsergridApplication.py
new file mode 100644
index 0000000..9c86efd
--- /dev/null
+++ b/sdks/python/usergrid/UsergridApplication.py
@@ -0,0 +1,65 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import logging
+from usergrid import UsergridError, UsergridCollection
+from usergrid.app_templates import app_url_template
+
+__author__ = 'Jeff.West@yahoo.com'
+
+class UsergridApplication(object):
+    def __init__(self, app_id, client):
+        self.app_id = app_id
+        self.client = client
+        self.logger = logging.getLogger('usergrid.UsergridClient')
+
+    def list_collections(self):
+        url = app_url_template.format(app_id=self.app_id,
+                                      **self.client.url_data)
+        r = self.client.get(url)
+
+        if r.status_code == 200:
+            api_response = r.json()
+            collection_list = api_response.get('entities')[0].get('metadata', {}).get('collections', {})
+            collections = {}
+
+            for collection_name in collection_list:
+                collections[collection_name] = UsergridCollection(self.client.org_id,
+                                                                  self.app_id,
+                                                                  collection_name,
+                                                                  self.client)
+
+            return collections
+
+        else:
+            raise UsergridError(message='Unable to post to list collections',
+                                status_code=r.status_code,
+                                api_response=r,
+                                url=url)
+
+    def collection(self, collection_name):
+        return UsergridCollection(self.client.org_id,
+                                  self.app_id,
+                                  collection_name,
+                                  self.client)
+
+    def authenticate_app_client(self,
+                                **kwargs):
+
+        return self.client.authenticate_app_client(self.app_id, **kwargs)
diff --git a/sdks/python/usergrid/UsergridAuth.py b/sdks/python/usergrid/UsergridAuth.py
new file mode 100644
index 0000000..f29bcb9
--- /dev/null
+++ b/sdks/python/usergrid/UsergridAuth.py
@@ -0,0 +1,105 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import requests
+from usergrid.management_templates import org_token_url_template
+
+
+class UsergridAuth:
+    def __init__(self,
+                 grant_type,
+                 url_template,
+                 username=None,
+                 password=None,
+                 client_id=None,
+                 client_secret=None,
+                 token_ttl_seconds=86400):
+
+        self.grant_type = grant_type
+        self.username = username
+        self.password = password
+        self.client_id = client_id
+        self.client_secret = client_secret
+        self.token_ttl_seconds = token_ttl_seconds
+        self.url_template = url_template
+        self.access_token = None
+
+    def get_token_request(self):
+        if self.grant_type == 'client_credentials':
+            return {
+                'grant_type': 'client_credentials',
+                'client_id': self.client_id,
+                'client_secret': self.client_secret,
+                'ttl': self.token_ttl_seconds * 1000
+            }
+        elif self.grant_type == 'password':
+            return {
+                'grant_type': 'password',
+                'username': self.username,
+                'password': self.password,
+                'ttl': self.token_ttl_seconds * 1000
+            }
+
+        else:
+            raise ValueError('Unspecified/unknown grant type: %s' % self.grant_type)
+
+    def authenticate(self, client):
+        token_request = self.get_token_request()
+
+        url = self.url_template.format(**client.url_data)
+
+        r = requests.post(url, data=json.dumps(token_request))
+
+        if r.status_code == 200:
+            response = r.json()
+            self.access_token = response.get('access_token')
+
+        else:
+            raise ValueError('Unable to authenticate: %s' % r.text)
+
+
+class UsergridOrgAuth(UsergridAuth):
+    def __init__(self, client_id, client_secret, token_ttl_seconds=86400):
+        UsergridAuth.__init__(self,
+                              grant_type='client_credentials',
+                              url_template=org_token_url_template,
+                              client_id=client_id,
+                              client_secret=client_secret,
+                              token_ttl_seconds=token_ttl_seconds)
+
+
+class UsergridAppAuth(UsergridAuth):
+    def __init__(self, client_id, client_secret, token_ttl_seconds=86400):
+        UsergridAuth.__init__(self,
+                              grant_type='client_credentials',
+                              url_template=app_token_url_template,
+                              client_id=client_id,
+                              client_secret=client_secret,
+                              token_ttl_seconds=token_ttl_seconds)
+
+
+class UsergridUserAuth(UsergridAuth):
+    def __init__(self, username, password, token_ttl_seconds=86400):
+        UsergridAuth.__init__(self,
+                              grant_type='password',
+                              url_template=app_token_url_template,
+                              username=username,
+                              password=password,
+                              token_ttl_seconds=token_ttl_seconds)
diff --git a/sdks/python/usergrid/UsergridClient.py b/sdks/python/usergrid/UsergridClient.py
new file mode 100644
index 0000000..cedeaab
--- /dev/null
+++ b/sdks/python/usergrid/UsergridClient.py
@@ -0,0 +1,401 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import logging
+import requests
+from usergrid.UsergridAuth import UsergridAppAuth
+from usergrid.app_templates import get_entity_url_template, post_collection_url_template, put_entity_url_template, \
+    delete_entity_url_template, connect_entities_by_type_template, assign_role_url_template
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def value_error(message):
+    raise ValueError(message)
+
+
+def usergrid_error(r):
+    pass
+
+
+class Usergrid(object):
+    client = None
+
+    @staticmethod
+    def init(org_id,
+             app_id,
+             **kwargs):
+        Usergrid.client = UsergridClient(org_id, app_id, **kwargs)
+
+    @staticmethod
+    def GET(collection, uuid_name, **kwargs):
+        return Usergrid.client.GET(collection, uuid_name, **kwargs)
+
+    @staticmethod
+    def PUT(collection, uuid_name, data, **kwargs):
+        return Usergrid.client.PUT(collection, uuid_name, data, **kwargs)
+
+    @staticmethod
+    def POST(collection, data, **kwargs):
+        return Usergrid.client.POST(collection, data, **kwargs)
+
+    @staticmethod
+    def DELETE(collection, uuid_name, **kwargs):
+        return Usergrid.client.DELETE(collection, uuid_name, **kwargs)
+
+    @staticmethod
+    def connect_entities(from_entity, relationship, to_entity, **kwargs):
+        return Usergrid.client.connect_entities(from_entity, relationship, to_entity, **kwargs)
+
+    @staticmethod
+    def disconnect_entities(from_entity, relationship, to_entity, **kwargs):
+        return Usergrid.client.disconnect_entities(from_entity, relationship, to_entity, **kwargs)
+
+    @staticmethod
+    def assign_role(role_uuid_name, user_entity, **kwargs):
+        return Usergrid.client.assign_role(role_uuid_name, user_entity, **kwargs)
+
+
+class UsergridResponse(object):
+    def __init__(self, api_response, client):
+        self.api_response = api_response
+        self.client = client
+
+        if api_response is None:
+            self.ok = False
+            self.body = 'No Response'
+
+        else:
+            self.headers = api_response.headers
+
+            if api_response.status_code == 200:
+                self.ok = True
+                self.body = api_response.json()
+                self.entities = self.body.get('entities', [])
+
+            else:
+                self.ok = False
+
+                if api_response.headers.get('Content-type') == 'application/json':
+                    self.body = api_response.json()
+                else:
+                    self.body = 'HTTP %s: %s' % (api_response.status_code, api_response.text)
+
+    def __str__(self):
+        return json.dumps(self.body)
+
+    def first(self):
+        return UsergridEntity(entity_data=self.entities[0]) if self.ok and self.entities and len(
+                self.entities) > 0 else None
+
+    def entity(self):
+        return self.first()
+
+    def last(self):
+        return UsergridEntity(entity_data=self.entities[len(self.entities) - 1]) if self.ok and self.entities and len(
+                self.entities) > 0 else None
+
+    def has_next_page(self):
+        return 'cursor' in self.body if self.ok else False
+
+
+class UsergridEntity(object):
+    def __init__(self, entity_data):
+        self.entity_data = entity_data
+
+    def __str__(self):
+        return json.dumps(self.entity_data)
+
+    def get(self, name, default=None):
+        return self.entity_data.get(name, default)
+
+    def entity_id(self):
+
+        if self.entity_data.get('type', '').lower() in ['users', 'user']:
+            return self.entity_data.get('uuid', self.entity_data.get('username'))
+
+        return self.entity_data.get('uuid', self.entity_data.get('name'))
+
+    def can_mutate_or_load(self):
+        entity_id = self.entity_id()
+
+        if entity_id is None or self.entity_data.get('type') is None:
+            return False
+
+        return True
+
+    def put_property(self, name, value):
+        self.entity_data[name] = value
+
+    def put_properties(self, properties):
+        if isinstance(properties, dict):
+            self.entity_data.update(properties)
+
+    def remove_property(self, name):
+
+        if name is not None and name in self.entity_data:
+            del self.entity_data[name]
+
+    def remove_properties(self, properties):
+        if isinstance(properties, (list, dict)):
+            for property_name in properties:
+                self.remove_property(property_name)
+
+    def append(self, array_name, value):
+        if array_name in self.entity_data:
+            if isinstance(self.entity_data[array_name], list):
+                self.entity_data[array_name].append(value)
+        else:
+            self.entity_data[array_name] = [value]
+
+    def prepend(self, array_name, value):
+        if array_name in self.entity_data:
+            if isinstance(self.entity_data[array_name], list):
+                self.entity_data[array_name].pre(value)
+        else:
+            self.entity_data[array_name] = [value]
+
+    def insert(self, array_name, value, index):
+        if array_name in self.entity_data:
+            if isinstance(self.entity_data[array_name], list):
+                self.entity_data[array_name].insert(index, value)
+
+    def shift(self, array_name):
+        if array_name in self.entity_data:
+            if isinstance(self.entity_data[array_name], list):
+                value = self.entity_data[array_name][0]
+                self.entity_data[array_name] = self.entity_data[array_name][1:]
+                return value
+
+        return None
+
+    def reload(self):
+        if not self.can_mutate_or_load():
+            raise ValueError('Unable to reload entity: No uuid nor name')
+
+        response = Usergrid.GET(collection=self.entity_data.get('type'),
+                                uuid_name=self.entity_id())
+        if response.ok:
+            self.entity_data.update(response.entity().entity_data)
+
+        else:
+            raise ValueError('Unable to reload entity: %s' % response)
+
+    def save(self):
+        if not self.can_mutate_or_load():
+            raise ValueError('Unable to save entity: No uuid nor name')
+
+        response = Usergrid.PUT(collection=self.entity_data.get('type'),
+                                uuid_name=self.entity_id(),
+                                data=self.entity_data)
+
+        if response.ok and 'uuid' not in self.entity_data:
+            self.entity_data['uuid'] = response.entity().get('uuid')
+
+        return response
+
+    def remove(self):
+        if not self.can_mutate_or_load():
+            raise ValueError('Unable to delete entity: No uuid nor name')
+
+        return Usergrid.DELETE(collection=self.entity_data.get('type'),
+                               uuid_name=self.entity_id())
+
+    def get_connections(self, relationship, direction='connecting'):
+        pass
+
+    def connect(self, relationship, to_entity):
+
+        if not to_entity.can_mutate_or_load():
+            raise ValueError('Unable to connect to entity - no uuid or name')
+
+        if not self.can_mutate_or_load():
+            raise ValueError('Unable from connect to entity - no uuid or name')
+
+        return Usergrid.connect_entities(self, relationship, to_entity)
+
+    def disconnect(self, relationship, to_entity):
+        if not to_entity.can_mutate_or_load():
+            raise ValueError('Unable to connect to entity - no uuid or name')
+
+        if not self.can_mutate_or_load():
+            raise ValueError('Unable from connect to entity - no uuid or name')
+
+        return Usergrid.disconnect_entities(self, relationship, to_entity)
+
+    def attach_asset(self, filename, data, content_type):
+        pass
+
+    def download_asset(self, content_type=None):
+        pass
+
+
+class UsergridClient(object):
+    def __init__(self,
+                 org_id,
+                 app_id,
+                 base_url='http://api.usergrid.com',
+                 client_id=None,
+                 client_secret=None,
+                 token_ttl_seconds=86400,
+                 auth_fallback="none"):
+
+        self.base_url = base_url
+        self.org_id = org_id
+        self.app_id = app_id
+        self.auth_fallback = auth_fallback
+        self.logger = logging.getLogger('usergrid.UsergridClient')
+        self.session = requests.Session()
+
+        self.url_data = {
+            'base_url': base_url,
+            'org_id': org_id,
+            'app_id': app_id
+        }
+
+        if client_id and not client_secret:
+            value_error('Client ID Specified but not Secret')
+
+        elif client_secret and not client_id:
+            value_error('Client ID Specified but not Secret')
+
+        elif client_secret and client_id:
+            self.auth = UsergridAppAuth(client_id=client_id,
+                                        client_secret=client_secret,
+                                        token_ttl_seconds=token_ttl_seconds)
+
+            self.auth.authenticate(self)
+            self.session.headers.update({'Authorization': 'Bearer %s' % self.auth.access_token})
+
+    def __str__(self):
+        return json.dumps({
+            'base_url': self.base_url,
+            'org_id': self.org_id,
+            'app_id': self.app_id,
+            'access_token': self.auth.access_token
+        })
+
+    def GET(self, collection, uuid_name, connections='none', auth=None, **kwargs):
+        url = get_entity_url_template.format(collection=collection,
+                                             uuid_name=uuid_name,
+                                             connections=connections,
+                                             **self.url_data)
+        if auth:
+            r = requests.get(url, headers={'Authorization': 'Bearer %s' % auth.access_token})
+
+        else:
+            r = self.session.get(url)
+
+        return UsergridResponse(r, self)
+
+    def PUT(self, collection, uuid_name, data, auth=None, **kwargs):
+        url = put_entity_url_template.format(collection=collection,
+                                             uuid_name=uuid_name,
+                                             **self.url_data)
+
+        if auth:
+            r = requests.put(url,
+                             data=json.dumps(data),
+                             headers={'Authorization': 'Bearer %s' % auth.access_token})
+        else:
+            r = self.session.put(url, data=json.dumps(data))
+
+        return UsergridResponse(r, self)
+
+    def POST(self, collection, data, auth=None, **kwargs):
+        url = post_collection_url_template.format(collection=collection,
+                                                  **self.url_data)
+
+        if auth:
+            r = requests.post(url,
+                              data=json.dumps(data),
+                              headers={'Authorization': 'Bearer %s' % auth.access_token})
+        else:
+            r = self.session.post(url, data=json.dumps(data))
+
+        return UsergridResponse(r, self)
+
+    def DELETE(self, collection, uuid_name, auth=None, **kwargs):
+        url = delete_entity_url_template.format(collection=collection,
+                                                uuid_name=uuid_name,
+                                                **self.url_data)
+
+        if auth:
+            r = requests.delete(url, headers={'Authorization': 'Bearer %s' % auth.access_token})
+        else:
+            r = self.session.delete(url)
+
+        return UsergridResponse(r, self)
+
+    def connect_entities(self, from_entity, relationship, to_entity, auth=None, **kwargs):
+
+        url = connect_entities_by_type_template.format(from_collection=from_entity.get('type'),
+                                                       from_uuid_name=from_entity.entity_id(),
+                                                       relationship=relationship,
+                                                       to_collection=to_entity.get('type'),
+                                                       to_uuid_name=to_entity.entity_id(),
+                                                       **self.url_data)
+
+        if auth:
+            r = requests.post(url, headers={'Authorization': 'Bearer %s' % auth.access_token})
+        else:
+            r = self.session.post(url)
+
+        return UsergridResponse(r, self)
+
+    def assign_role(self, role_uuid_name, entity, auth=None, **kwargs):
+        url = assign_role_url_template.format(role_uuid_name=role_uuid_name,
+                                              entity_type=entity.get('type'),
+                                              entity_uuid_name=entity.entity_id(),
+                                              **self.url_data)
+
+        if auth:
+            r = requests.delete(url, headers={'Authorization': 'Bearer %s' % auth.access_token})
+        else:
+            r = self.session.delete(url)
+
+        return UsergridResponse(r, self)
+
+    def disconnect_entities(self, from_entity, relationship, to_entity, auth=None, **kwargs):
+            url = connect_entities_by_type_template.format(from_collection=from_entity.get('type'),
+                                                           from_uuid_name=from_entity.entity_id(),
+                                                           relationship=relationship,
+                                                           to_collection=to_entity.get('type'),
+                                                           to_uuid_name=to_entity.entity_id(),
+                                                           **self.url_data)
+
+            if auth:
+                r = requests.delete(url, headers={'Authorization': 'Bearer %s' % auth.access_token})
+            else:
+                r = self.session.delete(url)
+
+            return UsergridResponse(r, self)
+
+
+class UsergridUser(object):
+    def __init__(self):
+        pass
+
+
+class UsergridAsset(object):
+    def __init__(self, filename, data, content_type):
+        self.filename = filename
+        self.data = data
+        self.content_type = content_type
diff --git a/sdks/python/usergrid/UsergridCollection.py b/sdks/python/usergrid/UsergridCollection.py
new file mode 100644
index 0000000..eb8863d
--- /dev/null
+++ b/sdks/python/usergrid/UsergridCollection.py
@@ -0,0 +1,82 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+class UsergridCollection(object):
+    def __init__(self, org_id, app_id, collection_name, client):
+        self.org_id = org_id
+        self.app_id = app_id
+        self.collection_name = collection_name
+        self.client = client
+
+    def __str__(self):
+        return json.dumps({
+            'org_id': self.org_id,
+            'app_id': self.app_id,
+            'collection_name': self.collection_name,
+        })
+
+    def entity(self, uuid):
+        pass
+
+    def entity_from_data(self, data):
+        return UsergridEntity(org_id=self.org_id,
+                              app_id=self.app_id,
+                              collection_name=self.collection_name,
+                              data=data,
+                              client=self.client)
+
+    def query(self, ql='select *', limit=100):
+        url = collection_query_url_template.format(app_id=self.app_id,
+                                                   ql=ql,
+                                                   limit=limit,
+                                                   collection=self.collection_name,
+                                                   **self.client.url_data)
+
+        return UsergridQuery(url, headers=self.client.headers)
+
+    def entities(self, **kwargs):
+        return self.query(**kwargs)
+
+    def post(self, entity, **kwargs):
+        url = collection_url_template.format(collection=self.collection_name,
+                                             app_id=self.app_id,
+                                             **self.client.url_data)
+
+        r = self.client.post(url, data=entity, **kwargs)
+
+        if r.status_code == 200:
+            api_response = r.json()
+            entity = api_response.get('entities')[0]
+            e = UsergridEntity(org_id=self.org_id,
+                               app_id=self.app_id,
+                               collection_name=self.collection_name,
+                               data=entity,
+                               client=self.client)
+            return e
+
+        else:
+            raise UsergridError(message='Unable to post to collection name=[%s]' % self.collection_name,
+                                status_code=r.status_code,
+                                data=entity,
+                                api_response=r,
+                                url=url)
+
diff --git a/sdks/python/usergrid/UsergridConnection.py b/sdks/python/usergrid/UsergridConnection.py
new file mode 100644
index 0000000..c008b91
--- /dev/null
+++ b/sdks/python/usergrid/UsergridConnection.py
@@ -0,0 +1,30 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import logging
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+class UsergridConnection(object):
+    def __init__(self, source_entity, verb, target_entity):
+        self.source_entity = source_entity
+        self.verb = verb
+        self.target_entity = target_entity
+        self.logger = logging.getLogger('usergrid.UsergridConnection')
diff --git a/sdks/python/usergrid/UsergridError.py b/sdks/python/usergrid/UsergridError.py
new file mode 100644
index 0000000..3b2a4e0
--- /dev/null
+++ b/sdks/python/usergrid/UsergridError.py
@@ -0,0 +1,21 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/sdks/python/usergrid/UsergridOrganization.py b/sdks/python/usergrid/UsergridOrganization.py
new file mode 100644
index 0000000..14ad7a5
--- /dev/null
+++ b/sdks/python/usergrid/UsergridOrganization.py
@@ -0,0 +1,35 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from usergrid import UsergridApplication
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+class UsergridOrganization(object):
+    def __init__(self, org_id, client):
+        self.org_id = org_id
+        self.client = client
+
+    def application(self, app_id):
+        return UsergridApplication(app_id, client=self.client)
+
+    def app(self, app_id):
+        return self.application(app_id)
+
diff --git a/sdks/python/usergrid/UsergridQueryIterator.py b/sdks/python/usergrid/UsergridQueryIterator.py
new file mode 100755
index 0000000..e487fb3
--- /dev/null
+++ b/sdks/python/usergrid/UsergridQueryIterator.py
@@ -0,0 +1,157 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import logging
+import traceback
+import requests
+import time
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+class UsergridQueryIterator(object):
+    def __init__(self,
+                 url,
+                 operation='GET',
+                 sleep_time=10,
+                 page_delay=0,
+                 headers=None,
+                 data=None):
+
+        if not data:
+            data = {}
+        if not headers:
+            headers = {}
+
+        self.page_counter = 0
+        self.total_retrieved = 0
+        self.logger = logging.getLogger('usergrid.UsergridQuery')
+        self.data = data
+        self.headers = headers
+        self.url = url
+        self.operation = operation
+        self.next_cursor = None
+        self.entities = []
+        self.count_retrieved = 0
+        self._pos = 0
+        self.last_response = None
+        self.page_delay = page_delay
+        self.sleep_time = sleep_time
+        self.session = None
+
+    def _get_next_response(self, attempts=0):
+
+        if self.session is None:
+            self.session = requests.Session()
+
+        try:
+            if self.operation == 'PUT':
+                op = self.session.put
+            elif self.operation == 'DELETE':
+                op = self.session.delete
+            else:
+                op = self.session.get
+
+            target_url = self.url
+
+            if self.next_cursor is not None:
+                delim = '&' if '?' in target_url else '?'
+                target_url = '%s%scursor=%s' % (self.url, delim, self.next_cursor)
+
+            self.logger.debug('Operation=[%s] URL=[%s]' % (self.operation, target_url))
+
+            r = op(target_url, data=json.dumps(self.data), headers=self.headers)
+
+            if r.status_code == 200:
+                r_json = r.json()
+                count_retrieved = len(r_json.get('entities', []))
+                self.total_retrieved += count_retrieved
+                self.logger.debug('Retrieved [%s] entities in [%s]th page in [%s], total from [%s] is [%s]' % (
+                    count_retrieved, self.page_counter, r.elapsed, self.url, self.total_retrieved))
+
+                return r_json
+
+            elif r.status_code in [401, 404] and 'service_resource_not_found' in r.text:
+                self.logger.error('Query Not Found [%s] on URL=[%s]: %s' % (r.status_code, target_url, r.text))
+                raise SystemError('Query Not Found [%s] on URL=[%s]: %s' % (r.status_code, target_url, r.text))
+
+            else:
+                if attempts < 10:
+                    self.logger.error('Sleeping %s after HTTP [%s] for retry attempt=[%s] on URL=[%s], response: %s' % (
+                        self.sleep_time, r.status_code, attempts, target_url, r.text))
+
+                    time.sleep(self.sleep_time)
+
+                    return self._get_next_response(attempts=attempts + 1)
+
+                else:
+                    raise SystemError('Unable to get next response after %s attempts' % attempts)
+
+        except:
+            print traceback.format_exc()
+
+    def next(self):
+
+        if self.last_response is None:
+            self.logger.debug('getting first page, url=[%s]' % self.url)
+
+            self._process_next_page()
+
+        elif self._pos >= len(self.entities) > 0 and self.next_cursor is not None:
+
+            self.logger.debug('getting next page, count=[%s] url=[%s], cursor=[%s]' % (
+                self.count_retrieved, self.url, self.next_cursor))
+
+            self._process_next_page()
+            self.logger.debug('Sleeping [%s]s between pages' % self.page_delay)
+
+            time.sleep(self.page_delay)
+
+        if self._pos < len(self.entities):
+            response = self.entities[self._pos]
+            self._pos += 1
+            return response
+
+        raise StopIteration
+
+    def __iter__(self):
+        return self
+
+    def _process_next_page(self, attempts=0):
+
+        api_response = self._get_next_response()
+
+        if api_response is None:
+            message = 'Unable to retrieve query results from url=[%s]' % self.url
+            self.logger.error(message)
+            api_response = {}
+            raise StopIteration
+
+        self.last_response = api_response
+
+        self.entities = api_response.get('entities', [])
+        self.next_cursor = api_response.get('cursor', None)
+        self._pos = 0
+        self.count_retrieved += len(self.entities)
+        self.page_counter += 1
+
+        if self.next_cursor is None:
+            self.logger.debug('no cursor in response. Total pages=[%s], entities=[%s] url=[%s]' % (
+                self.page_counter, self.count_retrieved, self.url))
diff --git a/sdks/python/usergrid/__init__.py b/sdks/python/usergrid/__init__.py
new file mode 100644
index 0000000..93f8273
--- /dev/null
+++ b/sdks/python/usergrid/__init__.py
@@ -0,0 +1,37 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  The ASF licenses this file to You
+#  under the Apache License, Version 2.0 (the "License"); you may not
+#  use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.  For additional information regarding
+#  copyright in this work, please see the NOTICE file in the top level
+#  directory of this distribution.
+
+__all__ = [
+    'UsergridApplication',
+    'UsergridClient',
+    'UsergridConnection',
+    'UsergridConnectionProfile',
+    'UsergridEntity',
+    'Usergrid',
+    'UsergridError',
+    'UsergridOrganization',
+    'UsergridAuth',
+    'UsergridQueryIterator',
+    'UsergridResponse'
+]
+
+from .UsergridApplication import UsergridApplication
+from .UsergridClient import UsergridClient, Usergrid, UsergridResponse
+from .UsergridConnection import UsergridConnection
+from .UsergridOrganization import UsergridOrganization
+from .UsergridQueryIterator import UsergridQueryIterator
+from .UsergridAuth import UsergridAuth
diff --git a/sdks/python/usergrid/app_templates.py b/sdks/python/usergrid/app_templates.py
new file mode 100644
index 0000000..3598587
--- /dev/null
+++ b/sdks/python/usergrid/app_templates.py
@@ -0,0 +1,38 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
+org_url_template = "{base_url}/{org_id}"
+app_url_template = "%s/{app_id}" % org_url_template
+
+app_token_url_template = "%s/token" % app_url_template
+
+collection_url_template = "%s/{collection}" % app_url_template
+collection_query_url_template = "%s?ql={ql}&limit={limit}" % collection_url_template
+
+post_collection_url_template = collection_url_template
+entity_url_template = "%s/{uuid_name}" % collection_url_template
+get_entity_url_template = "%s?connections={connections}" % entity_url_template
+put_entity_url_template = entity_url_template
+delete_entity_url_template = entity_url_template
+
+assign_role_url_template = '%s/roles/{role_uuid_name}/{entity_type}/{entity_uuid_name}' % app_url_template
+
+connect_entities_by_type_template = '%s/{from_collection}/{from_uuid_name}/{relationship}/{to_collection}/{to_uuid_name}' % app_url_template
\ No newline at end of file
diff --git a/sdks/python/usergrid/management_templates.py b/sdks/python/usergrid/management_templates.py
new file mode 100644
index 0000000..c231b49
--- /dev/null
+++ b/sdks/python/usergrid/management_templates.py
@@ -0,0 +1,27 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
+management_base_url = '{base_url}/management'
+management_org_url_template = "%s/organizations/{org_id}" % management_base_url
+management_org_list_apps_url_template = "%s/applications" % management_org_url_template
+management_app_url_template = "%s/applications/{app_id}" % management_org_url_template
+
+org_token_url_template = "%s/token" % management_base_url
diff --git a/stack/README.md b/stack/README.md
index c9fbf7e..adf776c 100644
--- a/stack/README.md
+++ b/stack/README.md
@@ -11,7 +11,7 @@
 * JDK 1.8 (http://www.oracle.com/technetwork/java/javase/downloads/index.html)
 * 3.0.0 <= Maven (http://maven.apache.org/)
 * Cassandra 1.2.1+
-* ElasticSearch 1.4.4+
+* ElasticSearch 1.4.x - 1.7.x
 
 
 ## Building
diff --git a/stack/config/src/main/resources/usergrid-default.properties b/stack/config/src/main/resources/usergrid-default.properties
index e356efc..34d46ad 100644
--- a/stack/config/src/main/resources/usergrid-default.properties
+++ b/stack/config/src/main/resources/usergrid-default.properties
@@ -449,8 +449,8 @@
 # TTL of a unique value reservation when written to Cassandra
 collection.uniquevalues.reservation.ttl=10
 
-# The default authoritative region for when is not specified elsewhere
-collection.uniquevalues.authoritative.region=default
+# The default authoritative region for when is not specified elsewhere; by default leave this empty
+collection.uniquevalues.authoritative.region=
 
 
 ##############################  Usergrid Scheduler  ###########################
@@ -512,18 +512,19 @@
 
 
 
-###############################  Usergrid Central SSO  #############################
+################################  Usergrid Central SSO  #############################
+##
+## Usergrid has a feature to provide a distributing SSO system.  The below configurations
+## allow you to configure the central Usergrid SSO server.
+##
 #
-# Usergrid has a feature to provide a distributing SSO system.  The below configurations
-# allow you to configure the central Usergrid SSO server.
-#
-
-# Set the base URL of the central Usergrid SSO server.  This will enable
-# External Token Validation for Admin Users and will configure this Usergrid
-# instance to delegate all Admin User authentication to the central Usergrid SSO
-# server. See also: https://issues.apache.org/jira/browse/USERGRID-567
-#
-usergrid.central.url=
+## Set the base URL of the central Usergrid SSO server.  This will enable
+## External Token Validation for Admin Users and will configure this Usergrid
+## instance to delegate all Admin User authentication to the central Usergrid SSO
+## server. See also: https://issues.apache.org/jira/browse/USERGRID-567
+##
+#usergrid.central.url=
+#usergrid.central.enabled=
 
 # Set the HTTP Client connection pool for connections to the SSO central server.
 #
@@ -533,6 +534,10 @@
 
 
 
+####################   Usergrid External SSO Configuration   #####################
+usergrid.external.sso.enabled=false
+usergrid.external.sso.provider=
+usergrid.external.sso.url=
 
 ###############################  Usergrid Assets  #############################
 #
@@ -577,6 +582,9 @@
 usergrid.sysadmin.login.password=test
 usergrid.sysadmin.login.allowed=true
 
+#enable if superuser can create an org without any user associated with it.
+usergrid.superuser.addorg.enable=true
+
 # if usergrid.sysadmin.login.allowed=true, only allows sysadmin login if request is localhost
 # if usergrid.sysadmin.login.allowed=false, this property has no effect
 usergrid.sysadmin.localhost.only=false
@@ -716,6 +724,9 @@
 #
 #
 
+# Set a flag to allow public org registrations
+usergrid.management.allow-public-registrations=true
+
 # Set the requirements for activiation and confirmations
 usergrid.sysadmin.approve.users=false
 usergrid.sysadmin.approve.organizations=false
diff --git a/stack/core/src/main/java/org/apache/usergrid/batch/service/JobSchedulerService.java b/stack/core/src/main/java/org/apache/usergrid/batch/service/JobSchedulerService.java
index 50bd9bc..807daa3 100644
--- a/stack/core/src/main/java/org/apache/usergrid/batch/service/JobSchedulerService.java
+++ b/stack/core/src/main/java/org/apache/usergrid/batch/service/JobSchedulerService.java
@@ -93,7 +93,9 @@
         failCounter = metricsFactory.getCounter( JobSchedulerService.class, "scheduler.failed_jobs" );
 
         try {
-            logger.info( "Running one check iteration ..." );
+            if ( logger.isDebugEnabled() ) {
+                logger.debug( "Running one check iteration ..." );
+            }
             List<JobDescriptor> activeJobs;
 
             // run until there are no more active jobs
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
index fcda5b5..7b273e4 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
@@ -36,6 +36,8 @@
 import org.apache.usergrid.corepersistence.service.ConnectionService;
 import org.apache.usergrid.corepersistence.util.CpEntityMapUtils;
 import org.apache.usergrid.corepersistence.util.CpNamingUtils;
+import org.apache.usergrid.mq.QueueManager;
+import org.apache.usergrid.mq.QueueManagerFactory;
 import org.apache.usergrid.persistence.*;
 import org.apache.usergrid.persistence.Query.Level;
 import org.apache.usergrid.persistence.actorsystem.ActorSystemFig;
@@ -65,6 +67,7 @@
 import org.apache.usergrid.persistence.model.field.Field;
 import org.apache.usergrid.persistence.model.field.StringField;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
+import org.apache.usergrid.mq.Message;
 import org.apache.usergrid.utils.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -155,6 +158,9 @@
 
     private EntityCollectionManager ecm;
 
+    public QueueManagerFactory queueManagerFactory;
+
+
     //    /** Short-term cache to keep us from reloading same Entity during single request. */
 //    private LoadingCache<EntityScope, org.apache.usergrid.persistence.model.entity.Entity> entityCache;
 
@@ -178,7 +184,8 @@
                             final CollectionService collectionService,
                             final ConnectionService connectionService,
                             final CollectionSettingsFactory collectionSettingsFactory,
-                            final UUID applicationId ) {
+                            final UUID applicationId,
+                            final QueueManagerFactory queueManagerFactory) {
 
         this.entityManagerFig = entityManagerFig;
         this.actorSystemFig = actorSystemFig;
@@ -243,6 +250,8 @@
 
         // set to false for now
         this.skipAggregateCounters = false;
+
+        this.queueManagerFactory = queueManagerFactory;
     }
 
 
@@ -1493,6 +1502,21 @@
         return entity;
     }
 
+    public Message storeEventAsMessage(Mutator<ByteBuffer> m, Event event, long timestamp) {
+
+        counterUtils.addEventCounterMutations(m, applicationId, event, timestamp);
+
+        QueueManager q = queueManagerFactory.getQueueManager(applicationId);
+
+        Message message = new Message();
+        message.setType("event");
+        message.setCategory(event.getCategory());
+        message.setStringProperty("message", event.getMessage());
+        message.setTimestamp(timestamp);
+        q.postToQueue("events", message);
+
+        return message;
+    }
 
     @Override
     public Entity createItemInCollection( EntityRef entityRef, String collectionName,
@@ -2772,11 +2796,14 @@
                 }
             }
 
-            //doesn't allow the mutator to be ignored.
-            counterUtils.addEventCounterMutations( null, applicationId, event, timestamp );
+            Mutator<ByteBuffer> batch = createMutator( cass.getApplicationKeyspace( applicationId ), be );
+            Message message = storeEventAsMessage( batch, event, timestamp );
 
             incrementEntityCollection( "events", timestamp );
 
+            entity.setUuid( message.getUuid() );
+            batch.execute();
+
             return entity;
         }
 
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManagerFactory.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManagerFactory.java
index a419e58..2a88302 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManagerFactory.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManagerFactory.java
@@ -34,6 +34,7 @@
 import org.apache.usergrid.corepersistence.util.CpNamingUtils;
 import org.apache.usergrid.exception.ConflictException;
 import org.apache.usergrid.locking.LockManager;
+import org.apache.usergrid.mq.QueueManagerFactory;
 import org.apache.usergrid.persistence.*;
 import org.apache.usergrid.persistence.actorsystem.ActorSystemFig;
 import org.apache.usergrid.persistence.actorsystem.ActorSystemManager;
@@ -43,7 +44,6 @@
 import org.apache.usergrid.persistence.collection.EntityCollectionManager;
 import org.apache.usergrid.persistence.collection.exception.CollectionRuntimeException;
 import org.apache.usergrid.persistence.collection.serialization.impl.migration.EntityIdScope;
-import org.apache.usergrid.persistence.collection.uniquevalues.UniqueValueActor;
 import org.apache.usergrid.persistence.collection.uniquevalues.UniqueValuesService;
 import org.apache.usergrid.persistence.core.metrics.MetricsFactory;
 import org.apache.usergrid.persistence.core.migration.data.MigrationDataProvider;
@@ -116,6 +116,8 @@
     private UniqueValuesService uniqueValuesService;
     private final LockManager lockManager;
 
+    private final QueueManagerFactory queueManagerFactory;
+
     public static final String MANAGEMENT_APP_INIT_MAXRETRIES= "management.app.init.max-retries";
     public static final String MANAGEMENT_APP_INIT_INTERVAL = "management.app.init.interval";
 
@@ -150,10 +152,6 @@
                 this.actorSystemManager = injector.getInstance( ActorSystemManager.class );
 
                 actorSystemManager.registerRouterProducer( uniqueValuesService );
-                actorSystemManager.registerMessageType( UniqueValueActor.Request.class, "/user/uvProxy" );
-                actorSystemManager.registerMessageType( UniqueValueActor.Reservation.class, "/user/uvProxy" );
-                actorSystemManager.registerMessageType( UniqueValueActor.Cancellation.class, "/user/uvProxy" );
-                actorSystemManager.registerMessageType( UniqueValueActor.Confirmation.class, "/user/uvProxy" );
                 actorSystemManager.start();
                 actorSystemManager.waitForClientActor();
 
@@ -163,7 +161,7 @@
             }
         }
         this.lockManager = injector.getInstance( LockManager.class );
-
+        this.queueManagerFactory = injector.getInstance( QueueManagerFactory.class );
 
 
         // this line always needs to be last due to the temporary cicular dependency until spring is removed
@@ -379,7 +377,8 @@
             collectionService,
             connectionService,
             collectionSettingsFactory,
-            applicationId );
+            applicationId,
+            queueManagerFactory);
 
         return em;
     }
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
index b398562..57b1526 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
@@ -17,69 +17,48 @@
 package org.apache.usergrid.corepersistence;
 
 
-import java.util.*;
-
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.apache.usergrid.corepersistence.asyncevents.AsyncEventService;
 import org.apache.usergrid.corepersistence.index.CollectionSettings;
 import org.apache.usergrid.corepersistence.index.CollectionSettingsFactory;
 import org.apache.usergrid.corepersistence.index.CollectionSettingsScopeImpl;
-import org.apache.usergrid.corepersistence.results.IdQueryExecutor;
-import org.apache.usergrid.persistence.map.MapManager;
-import org.apache.usergrid.persistence.map.MapScope;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.util.Assert;
-
-import org.apache.usergrid.corepersistence.asyncevents.AsyncEventService;
 import org.apache.usergrid.corepersistence.pipeline.read.ResultsPage;
 import org.apache.usergrid.corepersistence.results.ConnectionRefQueryExecutor;
 import org.apache.usergrid.corepersistence.results.EntityQueryExecutor;
+import org.apache.usergrid.corepersistence.results.IdQueryExecutor;
 import org.apache.usergrid.corepersistence.service.CollectionSearch;
 import org.apache.usergrid.corepersistence.service.CollectionService;
 import org.apache.usergrid.corepersistence.service.ConnectionSearch;
 import org.apache.usergrid.corepersistence.service.ConnectionService;
 import org.apache.usergrid.corepersistence.util.CpEntityMapUtils;
 import org.apache.usergrid.corepersistence.util.CpNamingUtils;
-import org.apache.usergrid.persistence.ConnectedEntityRef;
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.Query;
+import org.apache.usergrid.persistence.*;
 import org.apache.usergrid.persistence.Query.Level;
-import org.apache.usergrid.persistence.RelationManager;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.RoleRef;
-import org.apache.usergrid.persistence.Schema;
-import org.apache.usergrid.persistence.SimpleEntityRef;
-import org.apache.usergrid.persistence.SimpleRoleRef;
 import org.apache.usergrid.persistence.cassandra.ConnectionRefImpl;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.entities.Group;
 import org.apache.usergrid.persistence.entities.User;
-import org.apache.usergrid.persistence.graph.Edge;
-import org.apache.usergrid.persistence.graph.GraphManager;
-import org.apache.usergrid.persistence.graph.MarkedEdge;
-import org.apache.usergrid.persistence.graph.SearchByEdge;
-import org.apache.usergrid.persistence.graph.SearchByEdgeType;
+import org.apache.usergrid.persistence.graph.*;
 import org.apache.usergrid.persistence.graph.impl.SimpleSearchByEdge;
 import org.apache.usergrid.persistence.graph.impl.SimpleSearchByEdgeType;
 import org.apache.usergrid.persistence.graph.impl.SimpleSearchEdgeType;
 import org.apache.usergrid.persistence.index.query.Identifier;
+import org.apache.usergrid.persistence.map.MapManager;
+import org.apache.usergrid.persistence.map.MapScope;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.entity.SimpleId;
 import org.apache.usergrid.persistence.schema.CollectionInfo;
 import org.apache.usergrid.utils.InflectionUtils;
 import org.apache.usergrid.utils.MapUtils;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.Assert;
 import rx.Observable;
 
-import static org.apache.usergrid.corepersistence.util.CpNamingUtils.createCollectionEdge;
-import static org.apache.usergrid.corepersistence.util.CpNamingUtils.createConnectionEdge;
-import static org.apache.usergrid.corepersistence.util.CpNamingUtils.createConnectionSearchByEdge;
-import static org.apache.usergrid.corepersistence.util.CpNamingUtils.getNameFromEdgeType;
+import java.util.*;
+
+import static org.apache.usergrid.corepersistence.util.CpNamingUtils.*;
 import static org.apache.usergrid.persistence.Schema.*;
 import static org.apache.usergrid.utils.ClassUtils.cast;
 import static org.apache.usergrid.utils.InflectionUtils.singularize;
@@ -954,7 +933,7 @@
         final Id sourceId = headEntity.asId();
 
         final Optional<String> queryString = query.isGraphSearch()? Optional.<String>absent(): query.getQl();
-
+        final boolean isConnecting = query.isConnecting();
 
         if ( query.getResultsLevel() == Level.REFS || query.getResultsLevel() == Level.IDS ) {
 
@@ -968,7 +947,7 @@
 
                     final ConnectionSearch search =
                         new ConnectionSearch( applicationScope, sourceId, entityType, connection, toExecute.getLimit(),
-                            queryString, cursor );
+                            queryString, cursor, isConnecting );
                     return connectionService.searchConnectionAsRefs( search );
                 }
             }.next();
@@ -983,7 +962,7 @@
                 //we need the callback so as we get a new cursor, we execute a new search and re-initialize our builders
                 final ConnectionSearch search =
                     new ConnectionSearch( applicationScope, sourceId, entityType, connection, toExecute.getLimit(),
-                        queryString, cursor );
+                        queryString, cursor, isConnecting );
                 return connectionService.searchConnection( search );
             }
         }.next();
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/builder/IdBuilder.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/builder/IdBuilder.java
index 781d7d5..85e9069 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/builder/IdBuilder.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/builder/IdBuilder.java
@@ -20,9 +20,10 @@
 package org.apache.usergrid.corepersistence.pipeline.builder;
 
 
+import com.google.common.base.Optional;
+import org.apache.usergrid.corepersistence.pipeline.Pipeline;
 import org.apache.usergrid.corepersistence.pipeline.PipelineOperation;
 import org.apache.usergrid.corepersistence.pipeline.read.FilterFactory;
-import org.apache.usergrid.corepersistence.pipeline.Pipeline;
 import org.apache.usergrid.corepersistence.pipeline.read.FilterResult;
 import org.apache.usergrid.corepersistence.pipeline.read.ResultsPage;
 import org.apache.usergrid.corepersistence.pipeline.read.collect.ConnectionRefFilter;
@@ -30,13 +31,9 @@
 import org.apache.usergrid.corepersistence.pipeline.read.collect.IdResumeFilter;
 import org.apache.usergrid.corepersistence.pipeline.read.collect.ResultsPageCollector;
 import org.apache.usergrid.corepersistence.pipeline.read.search.Candidate;
-import org.apache.usergrid.corepersistence.pipeline.read.traverse.IdFilter;
 import org.apache.usergrid.persistence.ConnectionRef;
 import org.apache.usergrid.persistence.model.entity.Entity;
 import org.apache.usergrid.persistence.model.entity.Id;
-
-import com.google.common.base.Optional;
-
 import rx.Observable;
 
 
@@ -69,6 +66,19 @@
 
 
     /**
+     * Traverse all connection edges to our input Id
+     * @param connectionName The name of the connection
+     * @param entityType The optional type of the entity
+     * @return
+     */
+    public IdBuilder traverseReverseConnection( final String connectionName, final Optional<String> entityType ) {
+        final PipelineOperation<FilterResult<Id>, FilterResult<Id>> filter;
+        filter = filterFactory.readGraphReverseConnectionFilter( connectionName );
+        return new IdBuilder( pipeline.withFilter(filter ), filterFactory );
+    }
+
+
+    /**
      * Traverse all the collection edges from our input Id
      * @param collectionName
      * @return
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/FilterFactory.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/FilterFactory.java
index 883fdc8..4b615d8 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/FilterFactory.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/FilterFactory.java
@@ -62,6 +62,14 @@
      */
     ReadGraphConnectionFilter readGraphConnectionFilter( final String connectionName );
 
+
+    /**
+     * Generate a new instance of the command with the specified parameters
+     *
+     * @param connectionName The connection name to use when reverse traversing the graph
+     */
+    ReadGraphReverseConnectionFilter readGraphReverseConnectionFilter( final String connectionName );
+
     /**
      * Generate a new instance of the command with the specified parameters
      *
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/AbstractReadReverseGraphFilter.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/AbstractReadReverseGraphFilter.java
new file mode 100644
index 0000000..dcda98f
--- /dev/null
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/AbstractReadReverseGraphFilter.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.usergrid.corepersistence.pipeline.read.traverse;
+
+
+import org.apache.usergrid.corepersistence.asyncevents.AsyncEventService;
+import org.apache.usergrid.corepersistence.asyncevents.EventBuilder;
+import org.apache.usergrid.corepersistence.asyncevents.EventBuilderImpl;
+import org.apache.usergrid.persistence.core.rx.RxTaskScheduler;
+import org.apache.usergrid.persistence.index.impl.IndexOperationMessage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.usergrid.corepersistence.pipeline.cursor.CursorSerializer;
+import org.apache.usergrid.corepersistence.pipeline.read.AbstractPathFilter;
+import org.apache.usergrid.corepersistence.pipeline.read.EdgePath;
+import org.apache.usergrid.corepersistence.pipeline.read.FilterResult;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.Edge;
+import org.apache.usergrid.persistence.graph.GraphManager;
+import org.apache.usergrid.persistence.graph.GraphManagerFactory;
+import org.apache.usergrid.persistence.graph.MarkedEdge;
+import org.apache.usergrid.persistence.graph.SearchByEdgeType;
+import org.apache.usergrid.persistence.graph.impl.SimpleSearchByEdgeType;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+import com.google.common.base.Optional;
+
+import rx.Observable;
+import rx.functions.Func1;
+
+
+/**
+ * Command for reading graph edges in reverse order.
+ */
+public abstract class AbstractReadReverseGraphFilter extends AbstractPathFilter<Id, Id, MarkedEdge> {
+
+    private static final Logger logger = LoggerFactory.getLogger( AbstractReadGraphFilter.class );
+
+    private final GraphManagerFactory graphManagerFactory;
+    private final RxTaskScheduler rxTaskScheduler;
+    private final EventBuilder eventBuilder;
+    private final AsyncEventService asyncEventService;
+
+
+    /**
+     * Create a new instance of our command
+     */
+    public AbstractReadReverseGraphFilter( final GraphManagerFactory graphManagerFactory,
+                                    final RxTaskScheduler rxTaskScheduler,
+                                    final EventBuilder eventBuilder,
+                                    final AsyncEventService asyncEventService ) {
+        this.graphManagerFactory = graphManagerFactory;
+        this.rxTaskScheduler = rxTaskScheduler;
+        this.eventBuilder = eventBuilder;
+        this.asyncEventService = asyncEventService;
+    }
+
+
+    @Override
+    public Observable<FilterResult<Id>> call( final Observable<FilterResult<Id>> previousIds ) {
+
+
+        final ApplicationScope applicationScope = pipelineContext.getApplicationScope();
+
+        //get the graph manager
+        final GraphManager graphManager =
+            graphManagerFactory.createEdgeManager( applicationScope );
+
+
+        final String edgeName = getEdgeTypeName();
+        final EdgeState edgeCursorState = new EdgeState();
+
+
+        //return all ids that are emitted from this edge
+        return previousIds.flatMap( previousFilterValue -> {
+
+            //set our our constant state
+            final Optional<MarkedEdge> startFromCursor = getSeekValue();
+            final Id id = previousFilterValue.getValue();
+
+
+            final Optional<Edge> typeWrapper = Optional.fromNullable(startFromCursor.orNull());
+
+            /**
+             * We do not want to filter.  This is intentional DO NOT REMOVE!!!
+             *
+             * We want to fire events on these edges if they exist, the delete was missed.
+             */
+            final SimpleSearchByEdgeType search =
+                new SimpleSearchByEdgeType( id, edgeName, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING,
+                    typeWrapper, false );
+
+            /**
+             * TODO, pass a message with pointers to our cursor values to be generated later
+             */
+            return graphManager.loadEdgesToTarget( search ).filter(markedEdge -> {
+
+                final boolean isDeleted = markedEdge.isDeleted();
+                final boolean isSourceNodeDeleted = markedEdge.isSourceNodeDelete();
+                final boolean isTargetNodeDelete = markedEdge.isTargetNodeDeleted();
+
+
+                if (isDeleted) {
+
+                    logger.info("Edge {} is deleted when seeking, deleting the edge", markedEdge);
+                    final Observable<IndexOperationMessage> indexMessageObservable = eventBuilder.buildDeleteEdge(applicationScope, markedEdge);
+
+                    indexMessageObservable
+                        .compose(applyCollector())
+                        .subscribeOn(rxTaskScheduler.getAsyncIOScheduler())
+                        .subscribe();
+
+                }
+
+                if (isSourceNodeDeleted) {
+
+                    final Id sourceNodeId = markedEdge.getSourceNode();
+                    logger.info("Edge {} has a deleted source node, deleting the entity for id {}", markedEdge, sourceNodeId);
+
+                    final EventBuilderImpl.EntityDeleteResults
+                        entityDeleteResults = eventBuilder.buildEntityDelete(applicationScope, sourceNodeId);
+
+                    entityDeleteResults.getIndexObservable()
+                        .compose(applyCollector())
+                        .subscribeOn(rxTaskScheduler.getAsyncIOScheduler())
+                        .subscribe();
+
+                    Observable.merge(entityDeleteResults.getEntitiesDeleted(),
+                        entityDeleteResults.getCompactedNode())
+                        .subscribeOn(rxTaskScheduler.getAsyncIOScheduler()).
+                        subscribe();
+
+                }
+
+                if (isTargetNodeDelete) {
+
+                    final Id targetNodeId = markedEdge.getTargetNode();
+                    logger.info("Edge {} has a deleted target node, deleting the entity for id {}", markedEdge, targetNodeId);
+
+                    final EventBuilderImpl.EntityDeleteResults
+                        entityDeleteResults = eventBuilder.buildEntityDelete(applicationScope, targetNodeId);
+
+                    entityDeleteResults.getIndexObservable()
+                        .compose(applyCollector())
+                        .subscribeOn(rxTaskScheduler.getAsyncIOScheduler())
+                        .subscribe();
+
+                    Observable.merge(entityDeleteResults.getEntitiesDeleted(),
+                        entityDeleteResults.getCompactedNode())
+                        .subscribeOn(rxTaskScheduler.getAsyncIOScheduler()).
+                        subscribe();
+
+                }
+
+
+                //filter if any of them are marked
+                return !isDeleted && !isSourceNodeDeleted && !isTargetNodeDelete;
+
+
+            })  // any non-deleted edges should be de-duped here so the results are unique
+                .distinct( new EdgeDistinctKey() )
+                //set the edge state for cursors
+                .doOnNext( edge -> {
+                    if (logger.isTraceEnabled()) {
+                        logger.trace("Seeking over edge {}", edge);
+                    }
+                    edgeCursorState.update( edge );
+                } )
+
+                //map our id from the target edge  and set our cursor every edge we traverse
+                .map( edge -> createFilterResult( edge.getSourceNode(), edgeCursorState.getCursorEdge(),
+                    previousFilterValue.getPath() ) );
+        } );
+    }
+
+
+    @Override
+    protected FilterResult<Id> createFilterResult( final Id emit, final MarkedEdge cursorValue,
+                                                   final Optional<EdgePath> parent ) {
+
+        //if it's our first pass, there's no cursor to generate
+        if(cursorValue == null){
+            return new FilterResult<>( emit, parent );
+        }
+
+        return super.createFilterResult( emit, cursorValue, parent );
+    }
+
+
+    @Override
+    protected CursorSerializer<MarkedEdge> getCursorSerializer() {
+        return EdgeCursorSerializer.INSTANCE;
+    }
+
+
+    /**
+     * Get the edge type name we should use when traversing
+     */
+    protected abstract String getEdgeTypeName();
+
+
+    /**
+     * Wrapper class. Because edges seek > the last returned, we need to keep our n-1 value. This will be our cursor We
+     * always try to seek to the same position as we ended.  Since we don't deal with a persistent read result, if we
+     * seek to a value = to our last, we may skip data.
+     */
+    private final class EdgeState {
+
+        private MarkedEdge cursorEdge = null;
+        private MarkedEdge currentEdge = null;
+
+
+        /**
+         * Update the pointers
+         */
+        private void update( final MarkedEdge newEdge ) {
+            cursorEdge = currentEdge;
+            currentEdge = newEdge;
+        }
+
+
+        /**
+         * Get the edge to use in cursors for resume
+         */
+        private MarkedEdge getCursorEdge() {
+            return cursorEdge;
+        }
+    }
+
+    private Observable.Transformer<IndexOperationMessage, IndexOperationMessage> applyCollector() {
+
+        return observable -> observable
+            .collect(() -> new IndexOperationMessage(), (collector, single) -> collector.ingest(single))
+            .filter(msg -> !msg.isEmpty())
+            .doOnNext(indexOperation -> {
+                asyncEventService.queueIndexOperationMessage(indexOperation);
+            });
+
+    }
+
+    /**
+     *  Return a key that Rx can use for determining a distinct edge.  Build a string containing the UUID
+     *  of the source and target nodes, with the type to ensure uniqueness rather than the int sum of the hash codes.
+     *  Edge timestamp is specifically left out as edges with the same source,target,type but different timestamps
+     *  are considered duplicates.
+     */
+    private class EdgeDistinctKey implements Func1<Edge,String> {
+
+        @Override
+        public String call(Edge edge) {
+
+            return buildDistinctKey(edge.getSourceNode().getUuid().toString(), edge.getTargetNode().getUuid().toString(),
+                edge.getType().toLowerCase());
+        }
+    }
+
+    protected static String buildDistinctKey(final String sourceNode, final String targetNode, final String type){
+
+        final String DISTINCT_KEY_SEPARATOR = ":";
+        StringBuilder stringBuilder = new StringBuilder();
+
+        stringBuilder
+            .append(sourceNode)
+            .append(DISTINCT_KEY_SEPARATOR)
+            .append(targetNode)
+            .append(DISTINCT_KEY_SEPARATOR)
+            .append(type);
+
+        return stringBuilder.toString();
+
+    }
+
+}
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/ReadGraphReverseConnectionFilter.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/ReadGraphReverseConnectionFilter.java
new file mode 100644
index 0000000..aa369c2
--- /dev/null
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/pipeline/read/traverse/ReadGraphReverseConnectionFilter.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.usergrid.corepersistence.pipeline.read.traverse;
+
+import com.google.inject.Inject;
+import com.google.inject.assistedinject.Assisted;
+import org.apache.usergrid.corepersistence.asyncevents.AsyncEventService;
+import org.apache.usergrid.corepersistence.asyncevents.EventBuilder;
+import org.apache.usergrid.corepersistence.rx.impl.AsyncRepair;
+import org.apache.usergrid.persistence.core.rx.RxTaskScheduler;
+import org.apache.usergrid.persistence.graph.GraphManagerFactory;
+
+import static org.apache.usergrid.corepersistence.util.CpNamingUtils.getEdgeTypeFromConnectionType;
+
+/**
+ * Created by ayeshadastagiri on 8/9/16.
+ */
+public class ReadGraphReverseConnectionFilter extends AbstractReadReverseGraphFilter{
+    private final String connectionName;
+
+    /**
+     * Create a new instance of our command
+     */
+    @Inject
+    public ReadGraphReverseConnectionFilter( final GraphManagerFactory graphManagerFactory,
+                                      @AsyncRepair final RxTaskScheduler rxTaskScheduler,
+                                      final EventBuilder eventBuilder,
+                                      final AsyncEventService asyncEventService,
+                                      @Assisted final String connectionName ) {
+        super( graphManagerFactory, rxTaskScheduler, eventBuilder, asyncEventService );
+        this.connectionName = connectionName;
+    }
+    @Override
+    protected String getEdgeTypeName() {
+        return getEdgeTypeFromConnectionType( connectionName );    }
+}
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionSearch.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionSearch.java
index 51f6768..8ad57fb 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionSearch.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionSearch.java
@@ -36,11 +36,12 @@
     private final int limit;
     private final Optional<String> query;
     private final Optional<String> cursor;
+    private final boolean isConnecting;
 
 
     public ConnectionSearch( final ApplicationScope applicationScope, final Id sourceNodeId, final Optional<String> entityType,
                              final String connectionName, final int limit, final Optional<String> query, final
-                             Optional<String> cursor ) {
+                             Optional<String> cursor, boolean isConnecting ) {
         this.applicationScope = applicationScope;
         this.sourceNodeId = sourceNodeId;
         this.entityType = entityType;
@@ -48,6 +49,7 @@
         this.limit = limit;
         this.query = query;
         this.cursor = cursor;
+        this.isConnecting = isConnecting;
     }
 
 
@@ -84,4 +86,8 @@
     public Optional<String> getEntityType() {
         return entityType;
     }
+
+    public boolean getIsConnecting(){
+        return isConnecting;
+    }
 }
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionServiceImpl.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionServiceImpl.java
index 4b7e66c..926c676 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionServiceImpl.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/service/ConnectionServiceImpl.java
@@ -94,8 +94,13 @@
 
 
         if ( !query.isPresent() ) {
-            results =
-                pipelineBuilder.traverseConnection( search.getConnectionName(), search.getEntityType() ).loadEntities();
+            if(search.getIsConnecting()){
+                results = pipelineBuilder.traverseReverseConnection(search.getConnectionName(), search.getEntityType()).loadEntities();
+            }
+            else {
+                results =
+                    pipelineBuilder.traverseConnection(search.getConnectionName(), search.getEntityType()).loadEntities();
+            }
         }
 
         else {
diff --git a/stack/core/src/main/java/org/apache/usergrid/mq/cassandra/CassandraMQUtils.java b/stack/core/src/main/java/org/apache/usergrid/mq/cassandra/CassandraMQUtils.java
index d48b6d2..388c895 100644
--- a/stack/core/src/main/java/org/apache/usergrid/mq/cassandra/CassandraMQUtils.java
+++ b/stack/core/src/main/java/org/apache/usergrid/mq/cassandra/CassandraMQUtils.java
@@ -244,7 +244,9 @@
             queuePath = "/";
         }
 
-        logger.info( "QueueManagerFactoryImpl.getFromQueue: {}", queuePath );
+        if ( logger.isDebugEnabled() ) {
+            logger.debug( "QueueManagerFactoryImpl.getFromQueue: {}", queuePath );
+        }
 
         return Queue.getQueueId( queuePath );
     }
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/PersistenceModule.java b/stack/core/src/main/java/org/apache/usergrid/persistence/PersistenceModule.java
index 70fff90..a945462 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/PersistenceModule.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/PersistenceModule.java
@@ -20,14 +20,12 @@
 package org.apache.usergrid.persistence;
 
 
-import org.springframework.beans.factory.BeanFactory;
-import org.springframework.beans.factory.ListableBeanFactory;
-import org.springframework.context.ApplicationContext;
-
-
 import com.google.inject.AbstractModule;
 import com.google.inject.Provider;
 import com.google.inject.spring.SpringIntegration;
+import org.apache.usergrid.mq.QueueManagerFactory;
+import org.springframework.beans.factory.BeanFactory;
+import org.springframework.context.ApplicationContext;
 
 
 /**
@@ -60,6 +58,10 @@
         final Provider<EntityManagerFactory> emfProvider = SpringIntegration.fromSpring( EntityManagerFactory.class, "entityManagerFactory" );
 
         bind( EntityManagerFactory.class ).toProvider(  emfProvider );
+
+        final Provider<QueueManagerFactory> qmfProvider = SpringIntegration.fromSpring( QueueManagerFactory.class, "queueManagerFactory" );
+        bind( QueueManagerFactory.class ).toProvider(  qmfProvider );
+
     }
 
 
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/Query.java b/stack/core/src/main/java/org/apache/usergrid/persistence/Query.java
index 150a1b0..d68c085 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/Query.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/Query.java
@@ -19,36 +19,25 @@
 package org.apache.usergrid.persistence;
 
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.commons.codec.binary.Base64;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Optional;
 import org.apache.commons.lang.StringUtils;
-
 import org.apache.usergrid.persistence.index.SelectFieldMapping;
 import org.apache.usergrid.persistence.index.exceptions.QueryParseException;
 import org.apache.usergrid.persistence.index.query.CounterResolution;
 import org.apache.usergrid.persistence.index.query.Identifier;
 import org.apache.usergrid.persistence.index.query.tree.Operand;
 import org.apache.usergrid.persistence.index.utils.ClassUtils;
-import org.apache.usergrid.persistence.index.utils.ConversionUtils;
 import org.apache.usergrid.persistence.index.utils.ListUtils;
 import org.apache.usergrid.persistence.index.utils.MapUtils;
 
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Optional;
+import java.io.IOException;
+import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.*;
+import java.util.Map.Entry;
 
 
 public class Query {
@@ -82,6 +71,7 @@
     private Long startTime;
     private Long finishTime;
     private boolean pad;
+    private boolean connecting = false;
     private CounterResolution resolution = CounterResolution.ALL;
     private List<Identifier> identifiers;
     private List<CounterFilterPredicate> counterFilters;
@@ -611,6 +601,15 @@
         this.pad = pad;
     }
 
+    //set the flag to retrieve the edges in the reverse direction.
+    public void setConnecting( boolean connecting ) {
+        this.connecting = connecting;
+    }
+
+    public boolean isConnecting() {
+        return connecting;
+    }
+
 
     public void setResolution( CounterResolution resolution ) {
         this.resolution = resolution;
diff --git a/stack/core/src/test/java/org/apache/usergrid/corepersistence/index/IndexServiceTest.java b/stack/core/src/test/java/org/apache/usergrid/corepersistence/index/IndexServiceTest.java
index adecd9d..ecc2b46 100644
--- a/stack/core/src/test/java/org/apache/usergrid/corepersistence/index/IndexServiceTest.java
+++ b/stack/core/src/test/java/org/apache/usergrid/corepersistence/index/IndexServiceTest.java
@@ -106,10 +106,6 @@
         if ( startedAkka.get(port) == null ) {
 
             actorSystemManager.registerRouterProducer( uniqueValuesService );
-            actorSystemManager.registerMessageType( UniqueValueActor.Request.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Reservation.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Cancellation.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Confirmation.class, "/user/uvProxy" );
             actorSystemManager.start( "localhost", port, "us-east" );
             actorSystemManager.waitForClientActor();
 
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/EntityConnectionsIT.java b/stack/core/src/test/java/org/apache/usergrid/persistence/EntityConnectionsIT.java
index be2f06e..d9ee960 100644
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/EntityConnectionsIT.java
+++ b/stack/core/src/test/java/org/apache/usergrid/persistence/EntityConnectionsIT.java
@@ -17,24 +17,16 @@
 package org.apache.usergrid.persistence;
 
 
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
+import org.apache.usergrid.AbstractCoreIT;
+import org.apache.usergrid.persistence.Query.Level;
+import org.apache.usergrid.persistence.entities.User;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.usergrid.AbstractCoreIT;
-import org.apache.usergrid.persistence.entities.User;
-import org.apache.usergrid.persistence.Query.Level;
+import java.util.*;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 public class EntityConnectionsIT extends AbstractCoreIT {
     private static final Logger logger = LoggerFactory.getLogger( EntityConnectionsIT.class );
@@ -335,6 +327,53 @@
         assertEquals( "user", res.getEntity().getType() );
     }
 
+    //not required . addd tests at service layer.
+    @Test
+    public void testGetConnectingEntitiesCursor() throws Exception {
+
+        UUID applicationId = app.getId( );
+        assertNotNull( applicationId );
+
+        EntityManager em = app.getEntityManager();
+        assertNotNull( em );
+
+        User fred = new User();
+        fred.setUsername( "fred" );
+        fred.setEmail( "fred@flintstones.com" );
+        Entity fredEntity = em.create( fred );
+        assertNotNull( fredEntity );
+
+        User wilma = new User();
+        wilma.setUsername( "wilma" );
+        wilma.setEmail( "wilma@flintstones.com" );
+        Entity wilmaEntity = em.create( wilma );
+        assertNotNull( wilmaEntity );
+
+        User John = new User();
+        John.setUsername( "John" );
+        John.setEmail( "John@flintstones.com" );
+        Entity JohnEntity = em.create( John );
+        assertNotNull( JohnEntity );
+
+        em.createConnection( fredEntity, "likes", wilmaEntity );
+        em.createConnection( fredEntity, "likes", JohnEntity );
+
+
+        app.refreshIndex();
+
+        // now query via the testConnection, this should work
+
+        Query query = Query.fromQLNullSafe("" );
+        query.setConnectionType( "likes" );
+//        query.setConnecting(true);
+        query.setEntityType( "user" );
+
+        // goes through "traverseReverseConnection"
+        Results r = em.searchTargetEntities(fredEntity, query);
+
+        assertEquals( 2, r.size() );
+    }
+
 
 
 
diff --git a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManager.java b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManager.java
index c7322dd..17754f0 100644
--- a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManager.java
+++ b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManager.java
@@ -51,13 +51,6 @@
     void registerRouterProducer( RouterProducer routerProducer );
 
     /**
-     * MUST be called before start() to register any messages to be sent.
-     * @param messageType Class of message.
-     * @param routerPath Router-path to which such messages are to be sent.
-     */
-    void registerMessageType( Class messageType, String routerPath );
-
-    /**
      * Local client for ActorSystem, send all local messages here for routing.
      */
     ActorRef getClientActor();
@@ -75,7 +68,7 @@
     /**
      * Get all regions known to system.
      */
-    public Set<String> getRegions();
+    Set<String> getRegions();
 
     /**
      * Publish message to all topic subscribers in all regions.
diff --git a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManagerImpl.java b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManagerImpl.java
index d8d284f..8dcb550 100644
--- a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManagerImpl.java
+++ b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/ActorSystemManagerImpl.java
@@ -69,7 +69,6 @@
     private ActorSystem clusterSystem = null;
 
 
-
     @Inject
     public ActorSystemManagerImpl( ActorSystemFig actorSystemFig ) {
         this.actorSystemFig = actorSystemFig;
@@ -132,12 +131,6 @@
 
 
     @Override
-    public void registerMessageType(Class messageType, String routerPath) {
-        routersByMessageType.put( messageType, routerPath );
-    }
-
-
-    @Override
     public ActorRef getClientActor() {
         return clientActor;
     }
@@ -191,16 +184,12 @@
         logger.info("Initializing Akka for hostname {} region {} regionList {} seeds {}",
             hostname, currentRegion, regionList, actorSystemFig.getSeeds() );
 
-        Config config = readClusterSystemConfig();
+        Config config = createConfiguration();
 
-        clusterSystem = createClusterSystemsFromConfigs( config );
+        clusterSystem = createClusterSystem( config );
 
         createClientActors( clusterSystem );
 
-        for ( RouterProducer routerProducer : routerProducers ) {
-            routerProducer.createLocalSystemActors( clusterSystem );
-        }
-
         mediator = DistributedPubSub.get( clusterSystem ).mediator();
     }
 
@@ -273,7 +262,7 @@
     /**
      * Read cluster config and add seed nodes to it.
      */
-    private Config readClusterSystemConfig() {
+    private Config createConfiguration() {
 
         Config config = null;
 
@@ -333,32 +322,33 @@
 
 
     /**
-     * Create actor system for this region, with cluster singleton manager & proxy.
+     * Create cluster system for this the current region
      */
-    private ActorSystem createClusterSystemsFromConfigs( Config config ) {
-
+    private ActorSystem createClusterSystem( Config config ) {
 
         // there is only 1 akka system for a Usergrid cluster
         final String clusterName = "ClusterSystem";
 
-
-        if( clusterSystem == null) {
+        if ( clusterSystem == null) {
 
             logger.info("Class: {}. ActorSystem [{}] not initialized, creating...", this, clusterName);
 
             clusterSystem = ActorSystem.create( clusterName, config );
 
             for ( RouterProducer routerProducer : routerProducers ) {
-                logger.info("Creating router producer [{}] for region [{}]", routerProducer.getName(), currentRegion );
-                routerProducer.createClusterSingletonManager( clusterSystem );
+                logger.info("Creating router [{}] for region [{}]", routerProducer.getRouterPath(), currentRegion );
+                routerProducer.produceRouter( clusterSystem, "io" );
             }
 
             for ( RouterProducer routerProducer : routerProducers ) {
-                logger.info("Creating [{}] proxy for region [{}] role 'io'", routerProducer.getName(), currentRegion);
-                routerProducer.createClusterSingletonProxy( clusterSystem, "io" );
+                Iterator<Class> messageTypes = routerProducer.getMessageTypes().iterator();
+                while ( messageTypes.hasNext() ) {
+                    Class messageType = messageTypes.next();
+                    routersByMessageType.put( messageType, routerProducer.getRouterPath() );
+                }
             }
 
-            //add a shutdown hook to clean all actor systems if the JVM exits without the servlet container knowing
+            // add a shutdown hook to clean all actor systems if the JVM exits without the servlet container knowing
             Runtime.getRuntime().addShutdownHook(new Thread() {
                 @Override
                 public void run() {
@@ -452,8 +442,6 @@
 
         logger.info("Shutting down Akka cluster: {}", clusterSystem.name());
         clusterSystem.shutdown();
-
-
     }
 
 }
diff --git a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/RouterProducer.java b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/RouterProducer.java
index d849dd9..5c14c6b 100644
--- a/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/RouterProducer.java
+++ b/stack/corepersistence/actorsystem/src/main/java/org/apache/usergrid/persistence/actorsystem/RouterProducer.java
@@ -19,33 +19,35 @@
 package org.apache.usergrid.persistence.actorsystem;
 
 import akka.actor.ActorSystem;
+
+import java.util.Collection;
 import java.util.Map;
 
 
+/**
+ * Interface used by ActorSystemManager to configure and create an Akka router.
+ */
 public interface RouterProducer {
 
-    String getName();
-
     /**
-     * Create cluster single manager for current region.
-     * Will be called once per router per JVM.
+     * Path to be used to send messages to this router.
      */
-    void createClusterSingletonManager( ActorSystem system );
+    String getRouterPath();
 
     /**
-     * Create cluster singleton proxy for region.
-     * Will be called once per router per JVM per region.
+     * Returns all message types that should be sent to this router for routing.
      */
-    void createClusterSingletonProxy( ActorSystem system, String role );
+    Collection<Class> getMessageTypes();
 
     /**
-     * Create other actors needed to support the router produced by the implementation.
-     */
-    void createLocalSystemActors( ActorSystem localSystem );
-
-    /**
-     * Add configuration for the router to configuration map
+     * Add configuration for the router to existing ActorSystem configuration.
+     * Called before ActorSystem is created.
      */
     void addConfiguration(Map<String, Object> configMap );
 
+    /**
+     * Produce router and any supporting objects.
+     * Called after ActorSystem is created.
+     */
+    void produceRouter( ActorSystem system, String role );
 }
diff --git a/stack/corepersistence/actorsystem/src/test/java/org/apache/usergrid/persistence/actorsystem/ActorServiceServiceTest.java b/stack/corepersistence/actorsystem/src/test/java/org/apache/usergrid/persistence/actorsystem/ActorServiceServiceTest.java
index 7ac7b12..c20b9a1 100644
--- a/stack/corepersistence/actorsystem/src/test/java/org/apache/usergrid/persistence/actorsystem/ActorServiceServiceTest.java
+++ b/stack/corepersistence/actorsystem/src/test/java/org/apache/usergrid/persistence/actorsystem/ActorServiceServiceTest.java
@@ -57,17 +57,11 @@
         RouterProducer routerProducer = Mockito.mock( RouterProducer.class );
         actorSystemManager.registerRouterProducer( routerProducer );
 
-        actorSystemManager.registerMessageType( String.class, "/users/path" );
-        actorSystemManager.registerMessageType( Integer.class, "/users/path" );
-        actorSystemManager.registerMessageType( Long.class, "/users/path" );
-
         actorSystemManager.start( "localhost", 2770, "us-east" );
         actorSystemManager.waitForClientActor();
 
-        verify( routerProducer ).createClusterSingletonManager( any() );
-        verify( routerProducer ).createClusterSingletonProxy( any(), eq("io") );
-        verify( routerProducer ).createLocalSystemActors( any() );
         verify( routerProducer ).addConfiguration( any() );
+        verify( routerProducer ).produceRouter( any(), eq("io") );
 
     }
 
diff --git a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesFig.java b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesFig.java
index edd0cbe..a220adc 100644
--- a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesFig.java
+++ b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesFig.java
@@ -36,6 +36,10 @@
 
     String UNIQUEVALUE_AUTHORITATIVE_REGION = "collection.uniquevalues.authoritative.region";
 
+    String UNIQUEVALUE_REQUEST_TIMEOUT = "collection.uniquevalues.request.timeout";
+
+    String UNIQUEVALUE_REQUEST_RETRY_COUNT = "collection.uniquevalues.request.retrycount";
+
 
     /**
      * Unique Value cache TTL in seconds.
@@ -62,6 +66,19 @@
      * Primary authoritative region (used if none other specified).
      */
     @Key(UNIQUEVALUE_AUTHORITATIVE_REGION)
-    @Default("default")
     String getAuthoritativeRegion();
+
+    /**
+     * Number of milliseconds before timing out the unique value request to the Actor System
+     */
+    @Key(UNIQUEVALUE_REQUEST_TIMEOUT)
+    @Default("5000")
+    int getRequestTimeout();
+
+    /**
+     * Number of actor instances to create on each.
+     */
+    @Key(UNIQUEVALUE_REQUEST_RETRY_COUNT)
+    @Default("2")
+    int getRequestRetryCount();
 }
diff --git a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesServiceImpl.java b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesServiceImpl.java
index 50114be..8bdb02c 100644
--- a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesServiceImpl.java
+++ b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/uniquevalues/UniqueValuesServiceImpl.java
@@ -35,7 +35,6 @@
 import org.apache.commons.lang3.StringUtils;
 import org.apache.usergrid.persistence.actorsystem.ActorSystemManager;
 import org.apache.usergrid.persistence.actorsystem.GuiceActorProducer;
-import org.apache.usergrid.persistence.collection.serialization.UniqueValue;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.model.entity.Entity;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -45,10 +44,7 @@
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.UUID;
+import java.util.*;
 import java.util.concurrent.TimeUnit;
 
 
@@ -81,8 +77,8 @@
 
 
     @Override
-    public String getName() {
-        return "UniqueValues ClusterSingleton Router";
+    public String getRouterPath() {
+        return "/user/uvProxy";
     }
 
 
@@ -151,25 +147,6 @@
     }
 
 
-    // TODO: do we need this or can we rely on UniqueCleanup + Cassandra replication?
-
-//    @Override
-//    public void releaseUniqueValues(ApplicationScope scope, Id entityId, UUID version, String region)
-//        throws UniqueValueException {
-//
-//        ready();
-//
-//        TODO: need to replicate logic from UniqueCleanup and make sure it happens in Authoritative Region
-//
-//        Iterator<UniqueValue> iterator = table.getUniqueValues( scope, entityId );
-//
-//        while ( iterator.hasNext() ) {
-//            UniqueValue uniqueValue = iterator.next();
-//            cancelUniqueField( scope, entityId, uniqueValue.getEntityVersion(), uniqueValue.getField(), region );
-//        }
-//    }
-
-
     private void reserveUniqueField(
         ApplicationScope scope, Entity entity, UUID version, Field field, String region ) throws UniqueValueException {
 
@@ -243,13 +220,13 @@
     private void sendUniqueValueRequest(
         Entity entity, String region, UniqueValueActor.Request request ) throws UniqueValueException {
 
-        int maxRetries = 5;
+        int maxRetries = uniqueValuesFig.getRequestRetryCount();
         int retries = 0;
 
         UniqueValueActor.Response response = null;
         while ( retries++ < maxRetries ) {
             try {
-                Timeout t = new Timeout( 1, TimeUnit.SECONDS );
+                Timeout t = new Timeout( uniqueValuesFig.getRequestTimeout(), TimeUnit.MILLISECONDS );
 
                 Future<Object> fut;
 
@@ -279,15 +256,15 @@
                     break;
 
                 } else if ( response != null  ) {
-                    logger.debug("ERROR status retrying {} entity {} rowkey {}",
+                    logger.warn("ERROR status retrying {} entity {} rowkey {}",
                             retries, entity.getId().getUuid(), request.getConsistentHashKey());
                 } else {
-                    logger.debug("Timed-out retrying {} entity {} rowkey",
+                    logger.warn("Timed-out retrying {} entity {} rowkey",
                             retries, entity.getId().getUuid(), request.getConsistentHashKey());
                 }
 
             } catch ( Exception e ) {
-                logger.debug("{} caused retry {} for entity {} rowkey {}",
+                logger.error("{} caused retry {} for entity {} rowkey {}",
                     e.getClass().getSimpleName(), retries, entity.getId().getUuid(), request.getConsistentHashKey());
             }
         }
@@ -309,9 +286,8 @@
 
 
     @Override
-    public void createClusterSingletonManager(ActorSystem system) {
+    public void produceRouter( ActorSystem system, String role ) {
 
-        // create cluster singleton supervisor for actor system
         ClusterSingletonManagerSettings settings =
             ClusterSingletonManagerSettings.create( system ).withRole("io");
 
@@ -319,54 +295,75 @@
             Props.create( GuiceActorProducer.class, injector, UniqueValuesRouter.class ),
             PoisonPill.getInstance(), settings ), "uvRouter" );
 
-    }
-
-
-    @Override
-    public void createClusterSingletonProxy( ActorSystem system, String role ) {
-
         ClusterSingletonProxySettings proxySettings =
             ClusterSingletonProxySettings.create( system ).withRole( role );
 
         system.actorOf( ClusterSingletonProxy.props( "/user/uvRouter", proxySettings ), "uvProxy" );
+
+        subscribeToReservations( system );
     }
 
 
     @Override
-    public void createLocalSystemActors( ActorSystem localSystem ) {
-        subscribeToReservations( localSystem );
-    }
-
-    @Override
     public void addConfiguration( Map<String, Object> configMap ) {
 
         int numInstancesPerNode = uniqueValuesFig.getUniqueValueInstancesPerNode();
 
-        Map<String, Object> akka = (Map<String, Object>)configMap.get("akka");
+        // TODO: replace this configuration stuff with equivalent Java code in the above "create" methods?
 
-        // TODO: replace this configuration stuff with equivalent Java code in the above "create" methods
+        // be careful not to overwrite configurations that other router producers may have added
 
-        akka.put( "actor", new HashMap<String, Object>() {{
-            put( "deployment", new HashMap<String, Object>() {{
-                put( "/uvRouter/singleton/router", new HashMap<String, Object>() {{
-                    put( "router", "consistent-hashing-pool" );
-                    put( "cluster", new HashMap<String, Object>() {{
-                        put( "enabled", "on" );
-                        put( "allow-local-routees", "on" );
-                        put( "use-role", "io" );
-                        put( "max-nr-of-instances-per-node", numInstancesPerNode );
-                        put( "failure-detector", new HashMap<String, Object>() {{
-                            put( "threshold", "10" );
-                            put( "acceptable-heartbeat-pause", "3 s" );
-                            put( "heartbeat-interval", "1 s" );
-                            put( "heartbeat-request", new HashMap<String, Object>() {{
-                                put( "expected-response-after", "3 s" );
-                            }} );
-                        }} );
+        Map<String, Object> akka = (Map<String, Object>) configMap.get( "akka" );
+        final Map<String, Object> deploymentMap;
+
+        if ( akka.get( "actor" ) == null ) {
+
+            // nobody has created anything under "actor" yet, so create it now
+            deploymentMap = new HashMap<>();
+            akka.put( "actor", new HashMap<String, Object>() {{
+                put( "deployment", deploymentMap );
+            }} );
+
+        } else if (((Map) akka.get( "actor" )).get( "deployment" ) == null) {
+
+            // nobody has created anything under "actor/deployment" yet, so create it now
+            deploymentMap = new HashMap<>();
+            ((Map) akka.get( "actor" )).put( "deployment", deploymentMap );
+
+        } else {
+
+            // somebody else already created "actor/deployment" config so use it
+            deploymentMap = (Map<String, Object>) ((Map) akka.get( "actor" )).get( "deployment" );
+        }
+
+        deploymentMap.put( "/uvRouter/singleton/router", new HashMap<String, Object>() {{
+            put( "router", "consistent-hashing-pool" );
+            put( "cluster", new HashMap<String, Object>() {{
+                put( "enabled", "on" );
+                put( "allow-local-routees", "on" );
+                put( "use-role", "io" );
+                put( "max-nr-of-instances-per-node", numInstancesPerNode );
+                put( "failure-detector", new HashMap<String, Object>() {{
+                    put( "threshold", "10" );
+                    put( "acceptable-heartbeat-pause", "3 s" );
+                    put( "heartbeat-interval", "1 s" );
+                    put( "heartbeat-request", new HashMap<String, Object>() {{
+                        put( "expected-response-after", "3 s" );
                     }} );
                 }} );
             }} );
         }} );
 
     }
+
+
+    @Override
+    public Collection<Class> getMessageTypes() {
+        List<Class> messageTypes = new ArrayList<>();
+        messageTypes.add( UniqueValueActor.Request.class);
+        messageTypes.add( UniqueValueActor.Reservation.class);
+        messageTypes.add( UniqueValueActor.Cancellation.class);
+        messageTypes.add( UniqueValueActor.Confirmation.class);
+        return messageTypes;
+    }
 }
diff --git a/stack/corepersistence/collection/src/test/java/org/apache/usergrid/persistence/collection/AbstractUniqueValueTest.java b/stack/corepersistence/collection/src/test/java/org/apache/usergrid/persistence/collection/AbstractUniqueValueTest.java
index 3bfc48b..cff70ee 100644
--- a/stack/corepersistence/collection/src/test/java/org/apache/usergrid/persistence/collection/AbstractUniqueValueTest.java
+++ b/stack/corepersistence/collection/src/test/java/org/apache/usergrid/persistence/collection/AbstractUniqueValueTest.java
@@ -36,10 +36,6 @@
         if ( startedAkka.get(port) == null ) {
 
             actorSystemManager.registerRouterProducer( uniqueValuesService );
-            actorSystemManager.registerMessageType( UniqueValueActor.Request.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Reservation.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Cancellation.class, "/user/uvProxy" );
-            actorSystemManager.registerMessageType( UniqueValueActor.Confirmation.class, "/user/uvProxy" );
             actorSystemManager.start( "localhost", port, "us-east" );
             actorSystemManager.waitForClientActor();
 
diff --git a/stack/corepersistence/queryindex/src/main/java/org/apache/usergrid/persistence/index/impl/EsEntityIndexImpl.java b/stack/corepersistence/queryindex/src/main/java/org/apache/usergrid/persistence/index/impl/EsEntityIndexImpl.java
index d2aff7e..6e04bed 100644
--- a/stack/corepersistence/queryindex/src/main/java/org/apache/usergrid/persistence/index/impl/EsEntityIndexImpl.java
+++ b/stack/corepersistence/queryindex/src/main/java/org/apache/usergrid/persistence/index/impl/EsEntityIndexImpl.java
@@ -56,12 +56,12 @@
 import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse;
 import org.elasticsearch.action.search.SearchRequestBuilder;
 import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.action.search.SearchScrollRequestBuilder;
 import org.elasticsearch.client.AdminClient;
 import org.elasticsearch.common.settings.ImmutableSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.indices.IndexAlreadyExistsException;
 import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.search.SearchHits;
@@ -110,7 +110,7 @@
     private static final String VERIFY_TYPE = "entity";
 
     private static final ImmutableMap<String, Object> DEFAULT_PAYLOAD =
-            ImmutableMap.<String, Object>builder().put(IndexingUtils.ENTITY_ID_FIELDNAME, UUIDGenerator.newTimeUUID().toString()).build();
+        ImmutableMap.<String, Object>builder().put(IndexingUtils.ENTITY_ID_FIELDNAME, UUIDGenerator.newTimeUUID().toString()).build();
 
 
     private final ApplicationScope applicationScope;
@@ -196,7 +196,7 @@
                 Settings settings = ImmutableSettings.settingsBuilder()
                     .put("index.number_of_shards", numberOfShards)
                     .put("index.number_of_replicas", numberOfReplicas)
-                        //dont' allow unmapped queries, and don't allow dynamic mapping
+                    //dont' allow unmapped queries, and don't allow dynamic mapping
                     .put("index.query.parse.allow_unmapped_fields", false)
                     .put("index.mapper.dynamic", false)
                     .put("action.write_consistency", writeConsistency)
@@ -205,9 +205,9 @@
                 //Added For Graphite Metrics
                 Timer.Context timeNewIndexCreation = addTimer.time();
                 final CreateIndexResponse cir = admin.indices().prepareCreate(indexName)
-                        .setSettings(settings)
+                    .setSettings(settings)
                     .execute()
-                        .actionGet();
+                    .actionGet();
                 timeNewIndexCreation.stop();
 
                 //create the mappings
@@ -301,11 +301,11 @@
             final String tempId = UUIDGenerator.newTimeUUID().toString();
 
             esProvider.getClient().prepareIndex( alias.getWriteAlias(), VERIFY_TYPE, tempId )
-                 .setSource(DEFAULT_PAYLOAD).get();
+                .setSource(DEFAULT_PAYLOAD).get();
 
             if (logger.isTraceEnabled()) {
                 logger.trace("Successfully created new document with docId {} in index read {} write {} and type {}",
-                        tempId, alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE);
+                    tempId, alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE);
             }
 
             // delete all types, this way if we miss one it will get cleaned up
@@ -313,7 +313,7 @@
 
             if (logger.isTraceEnabled()) {
                 logger.trace("Successfully deleted  documents in read {} write {} and type {} with id {}",
-                        alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE, tempId);
+                    alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE, tempId);
             }
 
             return true;
@@ -333,7 +333,7 @@
         //Added For Graphite Metrics
         Timer.Context timePutIndex = mappingTimer.time();
         PutMappingResponse  pitr = esProvider.getClient().admin().indices().preparePutMapping( indexName ).setType( "entity" ).setSource(
-                getMappingsContent() ).execute().actionGet();
+            getMappingsContent() ).execute().actionGet();
         timePutIndex.stop();
         if ( !pitr.isAcknowledged() ) {
             throw new IndexException( "Unable to create default mappings" );
@@ -381,7 +381,7 @@
         }
         if (logger.isTraceEnabled()) {
             logger.trace("Refreshed indexes: {},success:{} failed:{} ", StringUtils.join(indexes, ", "),
-                    successfulShards, failedShards);
+                successfulShards, failedShards);
         }
 
         IndexRefreshCommandInfo refreshResults = new IndexRefreshCommandInfo(failedShards == 0,
@@ -431,7 +431,7 @@
                 searchTypes.getTypeNames( applicationScope ), srb );
         }
 
-         //Added For Graphite Metrics
+        //Added For Graphite Metrics
         final Timer.Context timerContext = searchTimer.time();
 
         try {
@@ -493,11 +493,13 @@
                 .rangeQuery(IndexingUtils.EDGE_TIMESTAMP_FIELDNAME)
                 .gte(queryTimestamp);
 
-            QueryBuilder finalQuery = QueryBuilders
-                .boolQuery()
-                .must(entityIdQuery)
-                .must(nodeIdQuery)
-                .must(timestampQuery);
+            QueryBuilder finalQuery = QueryBuilders.constantScoreQuery(
+                QueryBuilders
+                    .boolQuery()
+                    .must(entityIdQuery)
+                    .must(nodeIdQuery)
+                    .must(timestampQuery)
+            );
 
             searchResponse = srb
                 .setQuery(finalQuery)
@@ -552,10 +554,13 @@
                 .gte(queryTimestamp)
                 .lt(markedTimestamp);
 
-            QueryBuilder finalQuery = QueryBuilders
-                .boolQuery()
-                .must(timestampQuery)
-                .must(nodeQuery);
+            QueryBuilder finalQuery = QueryBuilders.constantScoreQuery(
+                QueryBuilders
+                    .boolQuery()
+                    .must(timestampQuery)
+                    .must(nodeQuery)
+            );
+
 
             searchResponse = srb
                 .setQuery(finalQuery)
@@ -684,7 +689,7 @@
                             candidateResult.getVersion(),
                             markedVersion,
                             candidateResult.getId()
-                            );
+                        );
                     }
 
                     candidates.add(candidateResult);
@@ -745,7 +750,7 @@
 
         try {
             ClusterHealthResponse chr = esProvider.getClient().admin()
-                    .cluster().health(new ClusterHealthRequest()).get();
+                .cluster().health(new ClusterHealthRequest()).get();
             return Health.valueOf( chr.getStatus().name() );
         }
         catch ( Exception ex ) {
@@ -765,8 +770,8 @@
 
         try {
             String[] indexNames = this.getIndexes();
-           final ActionFuture<ClusterHealthResponse> future =  esProvider.getClient().admin().cluster().health(
-               new ClusterHealthRequest( indexNames  ) );
+            final ActionFuture<ClusterHealthResponse> future =  esProvider.getClient().admin().cluster().health(
+                new ClusterHealthRequest( indexNames  ) );
 
             //only wait 2 seconds max
             ClusterHealthResponse chr = future.actionGet(2000);
diff --git a/stack/corepersistence/queryindex/src/test/java/org/apache/usergrid/persistence/index/impl/EntityIndexTest.java b/stack/corepersistence/queryindex/src/test/java/org/apache/usergrid/persistence/index/impl/EntityIndexTest.java
index c2e259e..e64db83 100644
--- a/stack/corepersistence/queryindex/src/test/java/org/apache/usergrid/persistence/index/impl/EntityIndexTest.java
+++ b/stack/corepersistence/queryindex/src/test/java/org/apache/usergrid/persistence/index/impl/EntityIndexTest.java
@@ -390,7 +390,7 @@
 
         StopWatch timer = new StopWatch();
         timer.start();
-        CandidateResults candidateResults  = entityIndex.search( scope, searchTypes, queryString, num == 0 ?  1 : num  , 0 );
+        CandidateResults candidateResults  = entityIndex.search( scope, searchTypes, queryString, 1000, 0 );
 
         timer.stop();
 
@@ -402,6 +402,16 @@
 
     private void testQueries( final SearchEdge scope, SearchTypes searchTypes) {
 
+        testQuery( scope, searchTypes, "age > 35", 29 );
+
+        testQuery( scope, searchTypes, "age <= 35", 73 );
+
+        testQuery( scope, searchTypes, "age <= 35 or age > 35", 102 );
+
+        // TODO: uncomment this test when you are ready to fix USERGRID-1314
+        // (https://issues.apache.org/jira/browse/USERGRID-1314)
+        // testQuery( scope, searchTypes, "name = 'astro*' or age > 35", 29 );
+
         testQuery( scope, searchTypes, "name = 'Morgan Pierce'", 1 );
 
         testQuery( scope, searchTypes, "name = 'morgan pierce'", 1 );
diff --git a/stack/pom.xml b/stack/pom.xml
index e65826f..5186a13 100644
--- a/stack/pom.xml
+++ b/stack/pom.xml
@@ -210,8 +210,8 @@
         <!--
         Re-enable when query-validator updated to work with Core Persistence.
         https://issues.apache.org/jira/browse/USERGRID-221
-        <module>query-validator</module>
         -->
+        <module>query-validator</module>
     </modules>
 
     <dependencyManagement>
diff --git a/stack/query-validator/pom.xml b/stack/query-validator/pom.xml
index c161afc..4355986 100644
--- a/stack/query-validator/pom.xml
+++ b/stack/query-validator/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.usergrid</groupId>
         <artifactId>usergrid</artifactId>
-        <version>2.0.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <properties>
         <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
@@ -51,12 +51,16 @@
             </testResource>
         </testResources>
         <plugins>
+
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-surefire-plugin</artifactId>
                 <configuration>
-                    <argLine>-Xmx${ug.heapmax} -Xms${ug.heapmin} -Dfile.encoding=UTF-8 -Dsun.jnu.encoding=UTF-8 ${ug.argline}</argLine>
-
+                    <argLine>
+                    -Dwebapp.directory=${basedir}/../rest/src/main/webapp
+                    -Xmx${ug.heapmax} -Xms${ug.heapmin}
+                    -Dfile.encoding=UTF-8 -Dsun.jnu.encoding=UTF-8 ${ug.argline}
+                    </argLine>
                     <includes>
                         <include>**/*Suite.java</include>
                     </includes>
@@ -71,10 +75,10 @@
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.3.2</version>
+                <version>3.3</version>
                 <configuration>
-                    <source>1.7</source>
-                    <target>1.7</target>
+                    <source>1.8</source>
+                    <target>1.8</target>
                     <optimize>true</optimize>
                     <debug>true</debug>
                     <showDeprecation>true</showDeprecation>
@@ -246,24 +250,10 @@
             <scope>test</scope>
         </dependency>
 
-        <!--  use the external test client.  Just depend on the maven jetty plugin to launch jetty -->
-        <dependency>
-            <groupId>com.sun.jersey.jersey-test-framework</groupId>
-            <artifactId>jersey-test-framework-external</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey.jersey-test-framework</groupId>
-            <artifactId>jersey-test-framework-core</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-
-
         <dependency>
             <groupId>org.apache.usergrid</groupId>
             <artifactId>usergrid-java-client</artifactId>
+            <version>${project.version}</version>
         </dependency>
 
         <dependency>
diff --git a/stack/query-validator/src/main/java/org/apache/usergrid/query/validator/ApiServerRunner.java b/stack/query-validator/src/main/java/org/apache/usergrid/query/validator/ApiServerRunner.java
index b06e40b..922cd02 100644
--- a/stack/query-validator/src/main/java/org/apache/usergrid/query/validator/ApiServerRunner.java
+++ b/stack/query-validator/src/main/java/org/apache/usergrid/query/validator/ApiServerRunner.java
@@ -16,21 +16,16 @@
  */
 package org.apache.usergrid.query.validator;
 
-import com.fasterxml.jackson.databind.JsonNode;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.logging.Level;
+import java.util.*;
 import java.util.logging.Logger;
-import org.apache.commons.lang.StringUtils;
+import org.apache.usergrid.java.client.UsergridClient;
+import org.apache.usergrid.java.client.auth.UsergridUserAuth;
+import org.apache.usergrid.java.client.model.UsergridEntity;
+import org.apache.usergrid.java.client.query.UsergridQuery;
+import org.apache.usergrid.java.client.response.UsergridResponse;
 import org.apache.usergrid.persistence.Entity;
 import org.apache.usergrid.persistence.Schema;
-import org.springframework.http.HttpMethod;
 import org.springframework.stereotype.Component;
-import org.apache.usergrid.java.client.Client;
-import org.apache.usergrid.java.client.response.ApiResponse;
-import static org.apache.usergrid.java.client.utils.ObjectUtils.isEmpty;
 
 
 /**
@@ -40,7 +35,7 @@
 public class ApiServerRunner implements QueryRunner {
 
     private Logger logger = Logger.getLogger(SqliteRunner.class.getName());
-    private Client client;
+    private UsergridClient client;
 
     private String org;
     private String app;
@@ -52,62 +47,38 @@
 
     @Override
     public boolean setup() {
-        client = new Client(getOrg(), getApp()).withApiUrl(getBaseUri());
-        String accessToken = authorize(email, password);
-        if(!StringUtils.isEmpty(accessToken))
-            client.setAccessToken(accessToken);
+
+        client = new UsergridClient(getOrg(), getApp(), getBaseUri());
+        UsergridUserAuth usergridUserAuth = new UsergridUserAuth(email, password, true);
+        client.authenticateUser(usergridUserAuth);
+
         return insertDatas();
     }
 
-    public String authorize(String email, String password) {
-        String accessToken = null;
-        Map<String, Object> formData = new HashMap<String, Object>();
-        formData.put("grant_type", "password");
-        formData.put("username", email);
-        formData.put("password", password);
-        ApiResponse response = client.apiRequest(HttpMethod.POST, null, formData,
-                "management", "token");
-        if (!isEmpty(response.getAccessToken())) {
-            accessToken = response.getAccessToken();
-            logger.info("Access token: " + accessToken);
-        } else {
-            logger.info("Response: " + response);
-        }
-        return accessToken;
-    }
-
     public boolean insertDatas() {
-       List<org.apache.usergrid.java.client.entities.Entity> clientEntities = getEntitiesForClient(getEntities());
-       for(org.apache.usergrid.java.client.entities.Entity entity : clientEntities) {
-           ApiResponse response = client.createEntity(entity);
-           if( response == null || !StringUtils.isEmpty(response.getError()) ) {
-               logger.log(Level.SEVERE, response.getErrorDescription());
-               //throw new RuntimeException(response.getErrorDescription());
-           } else {
-               logger.log(Level.INFO, response.toString());
-           }
-       }
+       List<UsergridEntity> clientEntities = getEntitiesForClient(getEntities());
+       client.POST(clientEntities);
        return true;
     }
 
-    private List<org.apache.usergrid.java.client.entities.Entity> getEntitiesForClient(List<Entity> entities) {
-        List<org.apache.usergrid.java.client.entities.Entity> clientEntities = new ArrayList<org.apache.usergrid.java.client.entities.Entity>();
+    private List<UsergridEntity> getEntitiesForClient(List<Entity> entities) {
+        List<UsergridEntity> clientEntities = new ArrayList<>();
         for(Entity entity : entities) {
-            org.apache.usergrid.java.client.entities.Entity clientEntity = new org.apache.usergrid.java.client.entities.Entity();
-            clientEntity.setType(entity.getType());
+            UsergridEntity clientEntity = new UsergridEntity(entity.getType());
+
             Map<String, Object> properties = Schema.getDefaultSchema().getEntityProperties(entity);
             for(String key : properties.keySet()) {
                 Object value = entity.getProperty(key);
                 if( value instanceof String )
-                    clientEntity.setProperty(key,(String)value );
+                    clientEntity.putProperty(key,(String)value );
                 else if( value instanceof Long )
-                    clientEntity.setProperty(key,(Long)value );
+                    clientEntity.putProperty(key,(Long)value );
                 else if( value instanceof Integer )
-                    clientEntity.setProperty(key,(Integer)value );
+                    clientEntity.putProperty(key,(Integer)value );
                 else if( value instanceof Float )
-                    clientEntity.setProperty(key,(Float)value );
+                    clientEntity.putProperty(key,(Float)value );
                 else if( value instanceof Boolean )
-                    clientEntity.setProperty(key,(Boolean)value );
+                    clientEntity.putProperty(key,(Boolean)value );
             }
             clientEntities.add(clientEntity);
         }
@@ -121,32 +92,20 @@
 
     @Override
     public List<Entity> execute(String query, int limit) {
-        Map<String, Object> params = new HashMap<String, Object>();
-        params.put("ql", query);
-        params.put("limit", limit);
-        ApiResponse response = client.apiRequest(HttpMethod.GET, params, null, getOrg(), getApp(), getCollection());
+        UsergridQuery usergridQuery = new UsergridQuery().ql(query).limit(limit).type(getCollection());
+        UsergridResponse response = client.GET(usergridQuery);
         List<Entity> entities = new ArrayList<Entity>();
         if( response.getEntities() == null )
             return entities;
 
-        for(org.apache.usergrid.java.client.entities.Entity clientEntitity : response.getEntities()) {
+        for(UsergridEntity clientEntity : response.getEntities()) {
             Entity entity = new QueryEntity();
-            entity.setUuid(clientEntitity.getUuid());
-            entity.setType(clientEntitity.getType());
-            Map<String, JsonNode> values = clientEntitity.getProperties();
+            entity.setUuid(UUID.fromString(clientEntity.getUuid()));
+            entity.setType(clientEntity.getType());
+            Map<String, ?> values = clientEntity.toMapValue();
             for( String key : values.keySet() ) {
-                JsonNode node = values.get(key);
-                if( node.isBoolean() ) {
-                    entity.setProperty(key, node.asBoolean());
-                } else if( node.isInt() ) {
-                    entity.setProperty(key, node.asInt());
-                } else if( node.isLong() ) {
-                    entity.setProperty(key, node.asLong());
-                } else if( node.isDouble() ) {
-                    entity.setProperty(key, node.asDouble());
-                } else {
-                    entity.setProperty(key, node.asText());
-                }
+                Object node = values.get(key);
+                entity.setProperty(key, node);
             }
             entities.add(entity);
         }
diff --git a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/AbstractQueryIT.java b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/AbstractQueryIT.java
index 212527d..b751621 100644
--- a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/AbstractQueryIT.java
+++ b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/AbstractQueryIT.java
@@ -18,13 +18,13 @@
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
-import org.junit.BeforeClass;
 import org.apache.usergrid.management.ApplicationInfo;
 import org.apache.usergrid.management.ManagementService;
-import org.apache.usergrid.management.OrganizationInfo;
-import org.apache.usergrid.management.UserInfo;
+import org.apache.usergrid.management.OrganizationOwnerInfo;
 import org.apache.usergrid.persistence.Entity;
+import org.apache.usergrid.rest.TomcatRuntime;
 import org.apache.usergrid.utils.JsonUtils;
+import org.junit.BeforeClass;
 
 import java.io.IOException;
 import java.net.URL;
@@ -33,11 +33,14 @@
 
 import static org.junit.Assert.assertNotNull;
 
+
 /**
  * @author Sungju Jin
  */
 public class AbstractQueryIT {
 
+    public static TomcatRuntime tomcatRuntime = TomcatRuntime.getInstance();
+
     protected static QueryValidator validator;
     private static Properties properties;
     private static String fullEndpoint;
@@ -49,8 +52,8 @@
 
     @BeforeClass
     public static void tearsup() throws Exception {
-        validator = QueryITSuite.cassandraResource.getBean(QueryValidator.class);
-        properties = QueryITSuite.cassandraResource.getBean("properties",Properties.class);
+        validator = QueryITSuite.serverResource.getSpringResource().getBean(QueryValidator.class);
+        properties = QueryITSuite.serverResource.getSpringResource().getBean("properties",Properties.class);
         if( isDisableLocalServer()) {
             return;
         }
@@ -64,15 +67,15 @@
         appName = appName + uuid;
         email = orgName + "@usergrid.com";
         ManagementService managementService = QueryITSuite.serverResource.getMgmtSvc();
-        UserInfo user = managementService.createAdminUser(orgName, "Query Test", email, password, false, false);
-        OrganizationInfo org = managementService.createOrganization(orgName, user, false );
-        assertNotNull( org );
-        ApplicationInfo app = managementService.createApplication( org.getUuid(), appName);
+        OrganizationOwnerInfo ownerInfo = managementService.createOwnerAndOrganization(
+            orgName, orgName, orgName, email, password, false, false );
+        assertNotNull( ownerInfo );
+        ApplicationInfo app = managementService.createApplication( ownerInfo.getOrganization().getUuid(), appName);
         assertNotNull( app );
     }
 
     private static void setProperties() {
-        port = QueryITSuite.serverResource.getTomcatPort();
+        port = tomcatRuntime.getPort();
         fullEndpoint = (String)properties.get("usergrid.query.validator.api.endpoint") + ":" + port;
         orgName = (String)properties.get("usergrid.query.validator.api.organization");
         appName = (String)properties.get("usergrid.query.validator.api.app");
@@ -80,7 +83,7 @@
         password = (String)properties.get("usergrid.query.validator.api.authorize.password");
     }
 
-    protected static void createInitializationDatas(String collection) {
+    protected static void createInitializationDatas(String collection) throws InterruptedException{
         List<Entity> entities = loadEntitiesFromResource(collection);
         QueryValidationConfiguration configuration = new QueryValidationConfiguration();
         configuration.setEndpointUri(fullEndpoint);
@@ -92,6 +95,7 @@
         configuration.setEntities(entities);
         validator.setConfiguration(configuration);
         validator.setup();
+        Thread.sleep(1000);
     }
 
     private static List<Entity> loadEntitiesFromResource(String collection) {
diff --git a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/QueryITSuite.java b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/QueryITSuite.java
index d763113..c44528f 100644
--- a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/QueryITSuite.java
+++ b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/QueryITSuite.java
@@ -23,16 +23,14 @@
 import org.apache.usergrid.query.validator.users.UserQueryIT;
 import org.apache.usergrid.rest.ITSetup;
 
-@RunWith(Suite.class)
-@Suite.SuiteClasses(
-        {
-                UserQueryIT.class
-        })
-public class QueryITSuite {
-    @ClassRule
-    public static CassandraResource cassandraResource = CassandraResource.newWithAvailablePorts();
 
-    //TODO Detecting current path
+@RunWith(Suite.class)
+@Suite.SuiteClasses( { UserQueryIT.class })
+public class QueryITSuite {
+
     @ClassRule
-    public static ITSetup serverResource = new ITSetup( cassandraResource, "../rest/src/main/webapp" );
+    public static CassandraResource cassandraResource = new CassandraResource();
+
+    @ClassRule
+    public static ITSetup serverResource = ITSetup.getInstance();
 }
diff --git a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/users/UserQueryIT.java b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/users/UserQueryIT.java
index a87a6be..445d153 100644
--- a/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/users/UserQueryIT.java
+++ b/stack/query-validator/src/test/java/org/apache/usergrid/query/validator/users/UserQueryIT.java
@@ -16,8 +16,10 @@
  */
 package org.apache.usergrid.query.validator.users;
 
+import net.jcip.annotations.NotThreadSafe;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.apache.usergrid.persistence.Entity;
 import org.apache.usergrid.query.validator.AbstractQueryIT;
@@ -28,13 +30,15 @@
 
 import java.util.List;
 
+
 /**
  * @author Sungju Jin
  */
+@NotThreadSafe
 public class UserQueryIT extends AbstractQueryIT {
 
     @BeforeClass
-    public static void setDatas() {
+    public static void setDatas() throws InterruptedException{
         createInitializationDatas("user");
     }
 
@@ -90,8 +94,8 @@
 
     @Test
     public void sexEqualOrNameEqual() {
-        String sqlite = "SELECT * FROM users WHERE sex = 'female' or name = 'curioe' LIMIT 10";
-        String api = "select * where sex = 'female' or name = 'curioe'";
+        String sqlite = "SELECT * FROM users WHERE sex = 'female' or name = 'curioe' ORDER BY created DESC LIMIT 10";
+        String api = "select * where sex = 'female' or name = 'curioe' order by created desc";
 
         QueryRequest request = new QueryRequest();
         request.setDbQuery(sqlite);
@@ -101,9 +105,12 @@
     }
 
     @Test
+    @Ignore("TODO: uncomment this test when you are ready to fix USERGRID-1314")
     public void nameBeginswithAndSexEqualAndAgeGreaterthanequalOrSexEqual_sortNameDesc() {
-        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' ORDER BY name desc LIMIT 10";
-        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' order by name desc";
+        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY name desc LIMIT 10";
+        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "order by name desc";
 
         QueryRequest request = new QueryRequest();
         request.setDbQuery(sqlite);
@@ -113,9 +120,12 @@
     }
 
     @Test
+    @Ignore("TODO: uncomment this test when you are ready to fix USERGRID-1314")
     public void nameBeginswithAndSexEqualAndAgeGreaterthanequalOrSexEqual_sortAddressAscNameDesc() {
-        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' ORDER BY address asc, name desc LIMIT 4";
-        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' order by address asc, name desc";
+        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY address asc, name desc LIMIT 4";
+        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "order by address asc, name desc";
 
         QueryRequest request = new QueryRequest();
         request.setDbQuery(sqlite);
@@ -126,9 +136,12 @@
     }
 
     @Test
+    @Ignore("TODO: uncomment this test when you are ready to fix USERGRID-1314")
     public void nameBeginswithAndSexEqualAndAgeGreaterthanequalOrSexEqual_sortAddressAscNameDesc_limitL4() {
-        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' ORDER BY address asc, name desc LIMIT 4";
-        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' order by address asc, name desc";
+        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY address asc, name desc LIMIT 4";
+        String api = "select * where name = 'a*' and sex = 'male' and age >= 35 or sex = 'female' " +
+            "order by address asc, name desc";
 
         QueryRequest request = new QueryRequest();
         request.setDbQuery(sqlite);
@@ -180,8 +193,8 @@
 
     @Test
     public void sexEqualAndAgeGreaterthanequal() {
-        String sqlite = " SELECT * FROM users WHERE sex = 'male' and age >= 35 LIMIT 10";
-        String api = "select * where sex = 'male' and age >= 35";
+        String sqlite = " SELECT * FROM users WHERE sex = 'male' and age >= 35 ORDER BY created DESC LIMIT 10";
+        String api = "select * where sex = 'male' and age >= 35 order by created desc";
 
         QueryRequest request = new QueryRequest();
         request.setDbQuery(sqlite);
@@ -273,7 +286,8 @@
 
     @Test
     public void sexEqualAndAgeGreaterthanequalOrSexEqual_sortAgeDesc() {
-        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' ORDER BY age desc LIMIT 10";
+        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY age desc LIMIT 10";
         String api = "select * where sex = 'male' and age >= 35 or sex = 'female' order by age desc";
 
         QueryRequest request = new QueryRequest();
@@ -285,7 +299,7 @@
 
     @Test
     public void limitL12() {
-        String sqlite = "SELECT * FROM users LIMIT 12";
+        String sqlite = "SELECT * FROM users order by created desc LIMIT 12";
         String api = null;
         int limit = 12;
 
@@ -299,7 +313,8 @@
 
     @Test
     public void sexEqualAndAgeGreaterthanequalOrSexEqual_sortNameDesc() {
-        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' ORDER BY name desc LIMIT 10";
+        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY name desc LIMIT 10";
         String api = "select * where sex = 'male' and age >= 35 or sex = 'female' order by name desc";
 
         QueryRequest request = new QueryRequest();
@@ -311,7 +326,8 @@
 
     @Test
     public void sexEqualAndAgeGreaterthanequalOrSexEqual_sortNameDesc_limitL20() {
-        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' ORDER BY name desc LIMIT 20";
+        String sqlite = "SELECT * FROM users WHERE sex = 'male' and age >= 35 or sex = 'female' " +
+            "ORDER BY name desc LIMIT 20";
         String api = "select * where sex = 'male' and age >= 35 or sex = 'female' order by name desc";
         int limit = 20;
 
@@ -325,7 +341,7 @@
 
     @Test
     public void limitL11() {
-        String sqlite = "SELECT * FROM users LIMIT 11";
+        String sqlite = "SELECT * FROM users order by created desc LIMIT 11";
         String api = null;
         int limit = 11;
 
@@ -338,8 +354,10 @@
     }
 
     @Test
+    @Ignore("TODO: uncomment this test when you are ready to fix USERGRID-1314")
     public void nameBeginswithAndSexEqualAndAgeGreaterthanequalOrSexEqual() {
-        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 20 or sex = 'female' LIMIT 10";
+        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 20 " +
+            "or sex = 'female' LIMIT 10";
         String api = "select * where name = 'a*' and sex = 'male' and age >= 20 or sex = 'female'";
 
         QueryRequest request = new QueryRequest();
@@ -350,8 +368,10 @@
     }
 
     @Test
+    @Ignore("TODO: uncomment this test when you are ready to fix USERGRID-1314")
     public void nameBeginswithAndSexEqualAndAgeGreaterthanequalOrSexEqual_limitL20() {
-        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 20 or sex = 'female' LIMIT 20";
+        String sqlite = "SELECT * FROM users WHERE name LIKE 'a%' and sex = 'male' and age >= 20 " +
+            "or sex = 'female' LIMIT 20";
         String api = "select * where name = 'a*' and sex = 'male' and age >= 20 or sex = 'female'";
         int limit = 20;
 
diff --git a/stack/query-validator/src/test/resources/usergrid-test-context.xml b/stack/query-validator/src/test/resources/usergrid-test-context.xml
index 03bb2e5..4ea1482 100644
--- a/stack/query-validator/src/test/resources/usergrid-test-context.xml
+++ b/stack/query-validator/src/test/resources/usergrid-test-context.xml
@@ -46,16 +46,17 @@
 
     <bean id="binaryStore" class="org.apache.usergrid.services.assets.data.LocalFileBinaryStore"/>
 
-    <bean id="setup" class="org.apache.usergrid.corepersistence.HybridSetup">
-        <constructor-arg ref="properties"/>
+    <bean id="setup" class="org.apache.usergrid.corepersistence.CpSetup">
         <constructor-arg ref="entityManagerFactory"/>
         <constructor-arg ref="cassandraService"/>
+        <constructor-arg ref="injector"/>
     </bean>
 
     <!-- refer to a named schemaManager from the DataControl annotation thusly -->
     <bean id="coreManager" class="org.apache.usergrid.persistence.CoreSchemaManager">
         <constructor-arg ref="setup"/>
         <constructor-arg ref="cassandraCluster"/>
+        <constructor-arg ref="injector"/>
     </bean>
 
     <import resource="usergrid-query-validator-context.xml"/>
diff --git a/stack/rest/pom.xml b/stack/rest/pom.xml
index 89d14b4..580814c 100644
--- a/stack/rest/pom.xml
+++ b/stack/rest/pom.xml
@@ -306,6 +306,14 @@
             <version>${jackson-2-version}</version>
         </dependency>
 
+        <!-- for mocking the Apigee SSO Service -->
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+            <version>${mockito.version}</version>
+            <scope>test</scope>
+        </dependency>
+
         <!-- databinding; ObjectMapper, JsonNode and related classes are here -->
         <dependency>
             <groupId>com.fasterxml.jackson.core</groupId>
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/AbstractContextResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/AbstractContextResource.java
index 84ebe49..8f5d549 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/AbstractContextResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/AbstractContextResource.java
@@ -55,6 +55,9 @@
     };
     protected static final ObjectMapper mapper = new ObjectMapper();
 
+    public final static String ROLE_SERVICE_ADMIN = "service-admin";
+    public static final String USERGRID_SYSADMIN_LOGIN_NAME = "usergrid.sysadmin.login.name";
+
 
     protected AbstractContextResource parent;
 
@@ -259,4 +262,17 @@
         }
         return jsonObject;
     }
+
+
+    /**
+     * check if its a system admin
+     * @return
+     */
+    public Boolean userServiceAdmin(String username) {
+
+        if (sc.isUserInRole(ROLE_SERVICE_ADMIN) || (username != null && username.equals(properties.getProperty(USERGRID_SYSADMIN_LOGIN_NAME)))) {
+            return true;
+        }
+        return false;
+    }
 }
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/applications/events/EventsResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/applications/events/EventsResource.java
index d5709d7..0b5eeb7 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/applications/events/EventsResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/applications/events/EventsResource.java
@@ -29,10 +29,7 @@
 import org.springframework.context.annotation.Scope;
 import org.springframework.stereotype.Component;
 
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
+import javax.ws.rs.*;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.UriInfo;
@@ -55,6 +52,7 @@
 
     @GET
     @JSONP
+    @Consumes(MediaType.APPLICATION_JSON)
     @Produces({MediaType.APPLICATION_JSON, "application/javascript"})
     public QueueResults executeQueueGet(
         @Context UriInfo ui, @QueryParam("callback") @DefaultValue("callback") String callback ) throws Exception {
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/AuthErrorInfo.java b/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/AuthErrorInfo.java
index 5aff66d..c9149e5 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/AuthErrorInfo.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/AuthErrorInfo.java
@@ -43,6 +43,7 @@
     INVALID_USERNAME_OR_PASSWORD_ERROR( "auth_invalid_username_or_password",
             "Unable to authenticate due to username or password being incorrect" ), //
     UNVERIFIED_OAUTH_ERROR( "auth_unverified_oath", "Unable to authenticate OAuth credentials" ), //
+    EXTERNALSSOPROVIDER_UNACTIVATED_ADMINUSER("externalssoprovider_unactivated_adminuser","Admin user not found or does not have access to any organizations."),
     NO_DOMAIN_ERROR( "auth_no_application", "Unable to authenticate due to application not found" ), //
     NOT_DOMAIN_OWNER_ERROR( "auth_not_application_owner", "" ), //
     EXPIRED_ACCESS_TOKEN_ERROR( "expired_token", "Unable to authenticate due to expired access token" ), //
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/RuntimeExceptionMapper.java b/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/RuntimeExceptionMapper.java
new file mode 100644
index 0000000..0e9b759
--- /dev/null
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/exceptions/RuntimeExceptionMapper.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.rest.exceptions;
+
+
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.Provider;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR;
+
+
+@Provider
+public class RuntimeExceptionMapper extends AbstractExceptionMapper<RuntimeException> {
+
+    private static final Logger logger = LoggerFactory.getLogger(RuntimeExceptionMapper.class);
+
+    @Override
+    public Response toResponse( RuntimeException e ) {
+
+        if(logger.isTraceEnabled()) {
+            logger.trace("Error during runtime processing", e.getMessage());
+        }
+
+        return toResponse( INTERNAL_SERVER_ERROR, e );
+    }
+}
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/ManagementResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/ManagementResource.java
index 1aa75ee..9ef67c9 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/ManagementResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/ManagementResource.java
@@ -19,9 +19,11 @@
 
 import org.apache.amber.oauth2.common.error.OAuthError;
 import org.apache.amber.oauth2.common.exception.OAuthProblemException;
+import org.apache.amber.oauth2.common.exception.OAuthSystemException;
 import org.apache.amber.oauth2.common.message.OAuthResponse;
 import org.apache.amber.oauth2.common.message.types.GrantType;
 import org.apache.commons.lang.StringUtils;
+import org.apache.shiro.SecurityUtils;
 import org.apache.shiro.codec.Base64;
 import org.apache.usergrid.management.ApplicationCreator;
 import org.apache.usergrid.management.UserInfo;
@@ -34,7 +36,13 @@
 import org.apache.usergrid.rest.management.organizations.OrganizationsResource;
 import org.apache.usergrid.rest.management.users.UsersResource;
 import org.apache.usergrid.security.oauth.AccessInfo;
+import org.apache.usergrid.security.shiro.principals.PrincipalIdentifier;
 import org.apache.usergrid.security.shiro.utils.SubjectUtils;
+import org.apache.usergrid.security.sso.ApigeeSSO2Provider;
+import org.apache.usergrid.security.sso.ExternalSSOProvider;
+import org.apache.usergrid.security.sso.SSOProviderFactory;
+import org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl;
+import org.apache.usergrid.utils.JsonUtils;
 import org.glassfish.jersey.server.mvc.Viewable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,7 +61,7 @@
 import static javax.servlet.http.HttpServletResponse.*;
 import static javax.ws.rs.core.MediaType.*;
 import static org.apache.commons.lang.StringUtils.isNotBlank;
-import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_CENTRAL_URL;
+import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_EXTERNAL_SSO_ENABLED;
 import static org.apache.usergrid.utils.JsonUtils.mapToJsonString;
 import static org.apache.usergrid.utils.StringUtils.stringOrSubstringAfterFirst;
 import static org.apache.usergrid.utils.StringUtils.stringOrSubstringBeforeFirst;
@@ -87,12 +95,18 @@
     @Autowired
     private ApplicationCreator applicationCreator;
 
+    @Autowired
+    private SSOProviderFactory ssoProviderFactory;
+
     // usergrid configuration property names needed
     public static final String USERGRID_SYSADMIN_LOGIN_NAME = "usergrid.sysadmin.login.name";
 
     MetricsFactory metricsFactory = null;
 
 
+    String access_token = null;
+
+
     public ManagementResource() {
         if (logger.isTraceEnabled()) {
             logger.trace( "ManagementResource initialized" );
@@ -153,13 +167,113 @@
                                          @QueryParam( "client_id" ) String client_id,
                                          @QueryParam( "client_secret" ) String client_secret,
                                          @QueryParam( "ttl" ) long ttl,
-                                         @QueryParam( "access_token" ) String access_token,
                                          @QueryParam( "callback" ) @DefaultValue( "" ) String callback )
             throws Exception {
-        return getAccessTokenInternal( ui, authorization, grant_type, username, password, client_id, client_secret, ttl,
-                callback, false, true );
+
+
+        final UserInfo user = SubjectUtils.getUser();
+
+        // if user is null ( meaning no token was provided and previously validated in OAuth2AccessTokenSecurityFilter)
+        // then assume it's a token request
+        if( user == null) {
+            return getAccessTokenInternal(ui, authorization, grant_type, username, password, client_id, client_secret, ttl,
+                callback, false, true);
+        }
+
+
+
+        // if it's not a token request and we have a user, extract details from the token
+
+        final long passwordChanged = management.getLastAdminPasswordChange( user.getUuid() );
+        final boolean ssoEnabled = Boolean.parseBoolean(properties.getProperty(USERGRID_EXTERNAL_SSO_ENABLED));
+        long tokenTtl;
+
+        PrincipalIdentifier userPrincipal  = (PrincipalIdentifier) SecurityUtils.getSubject().getPrincipal();
+        if ( userPrincipal != null && userPrincipal.getAccessTokenCredentials() != null ) {
+            this.access_token = userPrincipal.getAccessTokenCredentials().getToken();
+        }
+
+        String ssoUserId = null;
+        if(ssoEnabled && !user.getUsername().equals(properties.getProperty(USERGRID_SYSADMIN_LOGIN_NAME))){
+            ExternalSSOProvider provider = ssoProviderFactory.getProvider();
+            final Map<String, String> decodedTokenDetails = provider.getDecodedTokenDetails(access_token);
+            final String expiry = decodedTokenDetails.containsKey("expiry") ? decodedTokenDetails.get("expiry") : "0";
+
+            tokenTtl =
+                Long.valueOf(expiry) - System.currentTimeMillis()/1000;
+
+            if( provider instanceof ApigeeSSO2Provider ) {
+                ssoUserId = decodedTokenDetails.get("user_id");
+            }
+
+        }else{
+            tokenTtl = tokens.getTokenInfo(access_token).getDuration();
+        }
+
+
+        final AccessInfo access_info = new AccessInfo().withExpiresIn( tokenTtl ).withAccessToken( access_token )
+            .withPasswordChanged( passwordChanged );
+
+        // if external SSO is enabled, always set the external sso user id property, even if it's null
+        if ( ssoEnabled ){
+
+            access_info.setProperty("external_sso_user_id", ssoUserId);
+        }
+
+        access_info.setProperty( "user", management.getAdminUserOrganizationData( user, true, false) );
+
+
+        return Response.status( SC_OK ).type( jsonMediaType( callback ) )
+            .entity( wrapWithCallback( access_info, callback ) ).build();
+
+
     }
 
+    /**
+     * Get token details. Specially used for external tokens.
+     * @param ui
+     * @param authorization
+     * @param token
+     * @param provider
+     * @param keyUrl
+     * @param callback
+     * @return the json with all the token details. Error message if the external SSO provider is not supported or any other error.
+     * @throws Exception
+     */
+    @GET
+    @Path( "tokendetails" )
+    public Response getTokenDetails( @Context UriInfo ui, @HeaderParam( "Authorization" ) String authorization,
+                                    @QueryParam( "token" ) String token,
+                                    @QueryParam( "provider" )  @DefaultValue( "" ) String provider,
+                                    @QueryParam( "keyurl" )  @DefaultValue( "" ) String keyUrl,
+                                    @QueryParam( "callback" ) @DefaultValue( "" ) String callback
+                                    ) throws Exception {
+
+        ExternalSSOProvider externalprovider = null;
+        Map<String, Object> jwt = null;
+
+        if (! provider.isEmpty()) {
+            //check if its in one of the external provider list.
+            if (!ssoProviderFactory.getProvidersList().contains(StringUtils.upperCase(provider))) {
+                throw new IllegalArgumentException("Unsupported provider.");
+            } else {
+                //get the specific provider.
+                externalprovider = ssoProviderFactory.getSpecificProvider(provider);
+            }
+        }
+        else{   //if the provider is not specified get the default provider enabled in the properties.
+            externalprovider = ssoProviderFactory.getProvider();
+        }
+
+        if(keyUrl.isEmpty()) {
+            keyUrl =  externalprovider.getExternalSSOUrl();
+        }
+
+        jwt = externalprovider.getAllTokenDetails(token, keyUrl);
+
+        return Response.status( SC_OK ).type( jsonMediaType( callback ) )
+            .entity( wrapWithCallback(JsonUtils.mapToJsonString(jwt) , callback ) ).build();
+    }
 
     @GET
     @Path( "token" )
@@ -170,6 +284,7 @@
                                     @QueryParam( "client_id" ) String client_id,
                                     @QueryParam( "client_secret" ) String client_secret, @QueryParam( "ttl" ) long ttl,
                                     @QueryParam( "callback" ) @DefaultValue( "" ) String callback ) throws Exception {
+
         return getAccessTokenInternal( ui, authorization, grant_type, username, password, client_id, client_secret, ttl,
                 callback, false, false);
     }
@@ -180,6 +295,8 @@
                                            String callback, boolean adminData, boolean me) throws Exception {
 
 
+
+
         UserInfo user = null;
 
         try {
@@ -195,12 +312,11 @@
 
             if ( user == null ) {
 
-                if ( !me ) { // if not lightweight-auth, i.e. /management/me then...
 
-                    // make sure authentication is allowed considering
-                    // external token validation configuration (UG Central SSO)
-                    ensureAuthenticationAllowed( username, grant_type );
-                }
+                // make sure authentication is allowed considering
+                // external token validation configuration (UG Central SSO)
+                ensureAuthenticationAllowed( username, grant_type );
+
 
                 if ( authorization != null ) {
                     String type = stringOrSubstringBeforeFirst( authorization, ' ' ).toUpperCase();
@@ -277,6 +393,18 @@
                                .entity( wrapWithCallback( response.getBody(), callback ) ).build();
             }
 
+            //moved the check for sso enabled form MangementServiceImpl since was unable to get the current user there to check if its super user.
+            if( tokens.isExternalSSOProviderEnabled()
+                && !userServiceAdmin(username) ){
+                OAuthResponse response =
+                    OAuthResponse.errorResponse( SC_BAD_REQUEST ).setError( OAuthError.TokenResponse.INVALID_GRANT )
+                        .setErrorDescription( "External SSO integration is enabled, admin users must login via provider: "+
+                            properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) ).buildJSONMessage();
+                return Response.status( response.getResponseStatus() ).type( jsonMediaType( callback ) )
+                    .entity( wrapWithCallback( response.getBody(), callback ) ).build();
+
+            }
+
             String token = management.getAccessTokenForAdminUser( user.getUuid(), ttl );
             Long passwordChanged = management.getLastAdminPasswordChange( user.getUuid() );
 
@@ -284,7 +412,7 @@
                     new AccessInfo().withExpiresIn( tokens.getMaxTokenAgeInSeconds( token ) ).withAccessToken( token )
                                     .withPasswordChanged( passwordChanged );
 
-            access_info.setProperty( "user", management.getAdminUserOrganizationData( user, me ) );
+            access_info.setProperty( "user", management.getAdminUserOrganizationData( user, true, false) );
 
             // increment counters for admin login
             management.countAdminUserAction( user, "login" );
@@ -335,6 +463,7 @@
                                              @FormParam( "access_token" ) String access_token,
                                              @FormParam( "callback" ) @DefaultValue( "" ) String callback )
             throws Exception {
+
         return getAccessTokenInternal( ui, authorization, grant_type, username, password, client_id, client_secret, ttl,
                 callback, false, true );
     }
@@ -348,6 +477,8 @@
                                             @QueryParam( "callback" ) @DefaultValue( "" ) String callback )
             throws Exception {
 
+        ValidateJson(json);
+
         String grant_type = ( String ) json.get( "grant_type" );
         String username = ( String ) json.get( "username" );
         String password = ( String ) json.get( "password" );
@@ -376,6 +507,9 @@
                                               @QueryParam( "callback" ) @DefaultValue( "" ) String callback,
                                               @HeaderParam( "Authorization" ) String authorization ) throws Exception {
 
+
+        ValidateJson(json);
+
         String grant_type = ( String ) json.get( "grant_type" );
         String username = ( String ) json.get( "username" );
         String password = ( String ) json.get( "password" );
@@ -396,6 +530,12 @@
                 callback, false, false );
     }
 
+    private void ValidateJson(Map<String, Object> json) throws OAuthSystemException {
+        if ( json == null ) {
+            throw new IllegalArgumentException("missing json post data");
+        }
+    }
+
 
     @GET
     @Path( "authorize" )
@@ -450,6 +590,8 @@
                 else {
                     redirect_uri += "&";
                 }
+
+                //todo: check if sso enabled.
                 redirect_uri += "code=" + management.getAccessTokenForAdminUser( user.getUuid(), 0 );
                 if ( isNotBlank( state ) ) {
                     redirect_uri += "&state=" + URLEncoder.encode( state, "UTF-8" );
@@ -478,28 +620,24 @@
      */
     private void ensureAuthenticationAllowed( String username, String grant_type ) {
 
+
         if ( username == null || grant_type == null || !grant_type.equalsIgnoreCase( "password" )) {
             return; // we only care about username/password auth
         }
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-
+        if ( tokens.isExternalSSOProviderEnabled() ) {
             // when external tokens enabled then only superuser can obtain an access token
-
-            final String superuserName = properties.getProperty( USERGRID_SYSADMIN_LOGIN_NAME );
-            if ( !username.equalsIgnoreCase( superuserName )) {
-
+            if ( !userServiceAdmin(username)) {
                 // this guy is not the superuser
-                throw new IllegalArgumentException( "Admin Users must login via " +
-                        properties.getProperty( USERGRID_CENTRAL_URL ) );
+                throw new IllegalArgumentException( "External SSO integration is enabled, admin users must login via provider: "+
+                    properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
             }
         }
     }
 
 
+
+
     String errorMsg = "";
     String responseType;
     String clientId;
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/OrganizationsResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/OrganizationsResource.java
index 360e660..6105ce6 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/OrganizationsResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/OrganizationsResource.java
@@ -20,6 +20,7 @@
 import com.fasterxml.jackson.jaxrs.json.annotation.JSONP;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.StringUtils;
+import org.apache.shiro.SecurityUtils;
 import org.apache.usergrid.management.ApplicationCreator;
 import org.apache.usergrid.management.OrganizationInfo;
 import org.apache.usergrid.management.OrganizationOwnerInfo;
@@ -28,6 +29,8 @@
 import org.apache.usergrid.rest.ApiResponse;
 import org.apache.usergrid.rest.RootResource;
 import org.apache.usergrid.rest.security.annotations.RequireSystemAccess;
+import org.apache.usergrid.security.shiro.principals.PrincipalIdentifier;
+import org.apache.usergrid.security.shiro.utils.SubjectUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -40,7 +43,7 @@
 import javax.ws.rs.core.UriInfo;
 import java.util.*;
 
-import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_CENTRAL_URL;
+import static org.apache.commons.lang.StringUtils.isBlank;
 
 
 @Component( "org.apache.usergrid.rest.management.organizations.OrganizationsResource" )
@@ -55,6 +58,8 @@
 
     public static final String ORGANIZATION_PROPERTIES = "properties";
     public static final String ORGANIZATION_CONFIGURATION = "configuration";
+    public static final String USERGRID_SYSADMIN_LOGIN_NAME = "usergrid.sysadmin.login.name";
+    public static final String USERGRID_SUPERUSER_ADDORG_ENABLED ="usergrid.superuser.addorg.enable";
 
     @Autowired
     private ApplicationCreator applicationCreator;
@@ -69,6 +74,7 @@
     public ApiResponse getAllOrganizations() throws Exception{
 
         ApiResponse response = createApiResponse();
+        //TODO this needs paging at some point
         List<OrganizationInfo> orgs = management.getOrganizations(null, 10000);
         List<Object> jsonOrgList = new ArrayList<>();
 
@@ -185,12 +191,16 @@
                                              String email, String password, Map<String, Object> userProperties,
                                              Map<String, Object> orgProperties, String callback ) throws Exception {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
+        /* Providing no password in this request signifies that an existing admin users should be associated to the
+        newly requested organization. */
 
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Organization / Admin Users must be created via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        // Always let the sysadmin create an org, but otherwise follow the behavior specified with
+        // the property 'usergrid.management.allow-public-registration'
+        if ( ( System.getProperty("usergrid.management.allow-public-registration") != null
+            && !Boolean.valueOf(System.getProperty("usergrid.management.allow-public-registration"))
+            && !userServiceAdmin(null) ) ) {
+
+                throw new IllegalArgumentException("Public organization registration is disabled");
         }
 
         Preconditions
@@ -217,6 +227,12 @@
 
         applicationCreator.createSampleFor( organizationOwner.getOrganization() );
 
+        // ( DO NOT REMOVE ) Execute any post processing which may be overridden by external classes using UG as
+        // a dependency
+        management.createAdminUserPostProcessing(organizationOwner.getOwner(), null);
+        management.createOrganizationPostProcessing(organizationOwner.getOrganization(), null);
+        management.addUserToOrganizationPostProcessing(organizationOwner.getOwner(), organizationName, null);
+
         response.setData( organizationOwner );
         response.setSuccess();
 
@@ -224,22 +240,4 @@
         return response;
     }
 
-    /*
-     * @POST
-     *
-     * @Consumes(MediaType.MULTIPART_FORM_DATA) public JSONWithPadding
-     * newOrganizationFromMultipart(@Context UriInfo ui,
-     *
-     * @FormDataParam("organization") String organization,
-     *
-     * @FormDataParam("username") String username,
-     *
-     * @FormDataParam("name") String name,
-     *
-     * @FormDataParam("email") String email,
-     *
-     * @FormDataParam("password") String password) throws Exception { return
-     * newOrganizationFromForm(ui, organization, username, name, email,
-     * password); }
-     */
 }
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/users/UsersResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/users/UsersResource.java
index 40ba92e..3b70c06 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/users/UsersResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/organizations/users/UsersResource.java
@@ -129,12 +129,22 @@
         }
 
         if ( user == null ) {
-            user = management.createAdminUser( organization.getUuid(), username, name, email, password, false, false );
 
-            // A null may be returned if the user fails validation check
-            if ( user != null ) {
-                management.startAdminUserPasswordResetFlow( organization.getUuid(), user );
+            if ( tokens.isExternalSSOProviderEnabled() ){
+                //autoactivating user, since the activation is done via the external sso provider.
+                user = management.createAdminUser(organization.getUuid(),username,name,email,password,true,false);
             }
+            else {
+                user = management.createAdminUser(organization.getUuid(), username, name, email, password, false, false);
+                // A null may be returned if the user fails validation check
+                if (user != null) {
+                    management.startAdminUserPasswordResetFlow(organization.getUuid(), user);
+                }
+            }
+
+            // DO NOT REMOVE - used for external classes to hook into any post-processing
+            management.createAdminUserPostProcessing(user, null);
+
         }
 
         if ( user == null ) {
@@ -143,6 +153,9 @@
 
         management.addAdminUserToOrganization( user, organization, true );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -151,26 +164,6 @@
         return response;
     }
 
-	/*
-     * @RequireOrganizationAccess
-	 *
-	 * @POST
-	 *
-	 * @Consumes(MediaType.MULTIPART_FORM_DATA) public JSONWithPadding
-	 * newUserForOrganizationFromMultipart(
-	 *
-	 * @Context UriInfo ui, @FormDataParam("username") String username,
-	 *
-	 * @FormDataParam("name") String name,
-	 *
-	 * @FormDataParam("email") String email,
-	 *
-	 * @FormDataParam("password") String password) throws Exception {
-	 *
-	 * return newUserForOrganizationFromForm(ui, username, name, email,
-	 * password); }
-	 */
-
 
     @RequireOrganizationAccess
     @PUT
@@ -190,6 +183,9 @@
         }
         management.addAdminUserToOrganization( user, organization, true );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -217,6 +213,9 @@
         }
         management.addAdminUserToOrganization( user, organization, true );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -252,6 +251,9 @@
         }
         management.addAdminUserToOrganization( user, organization, true );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -278,6 +280,9 @@
         }
         management.removeAdminUserFromOrganization( user.getUuid(), organization.getUuid() );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.removeUserFromOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -314,6 +319,9 @@
         }
         management.removeAdminUserFromOrganization( user.getUuid(), organization.getUuid() );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.removeUserFromOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
@@ -341,6 +349,9 @@
         }
         management.removeAdminUserFromOrganization( user.getUuid(), organization.getUuid() );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.removeUserFromOrganizationPostProcessing(user, organization.getName(), null);
+
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         result.put( "user", user );
         response.setData( result );
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UserResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UserResource.java
index 28d46a6..af37cf5 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UserResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UserResource.java
@@ -20,16 +20,17 @@
 import com.fasterxml.jackson.jaxrs.json.annotation.JSONP;
 import net.tanesha.recaptcha.ReCaptchaImpl;
 import net.tanesha.recaptcha.ReCaptchaResponse;
-import org.apache.commons.lang.StringUtils;
+import org.apache.shiro.SecurityUtils;
 import org.apache.usergrid.management.ActivationState;
 import org.apache.usergrid.management.UserInfo;
 import org.apache.usergrid.rest.AbstractContextResource;
 import org.apache.usergrid.rest.ApiResponse;
 import org.apache.usergrid.rest.exceptions.RedirectionException;
-import org.apache.usergrid.rest.management.ManagementResource;
 import org.apache.usergrid.rest.management.users.organizations.OrganizationsResource;
 import org.apache.usergrid.rest.security.annotations.RequireAdminUserAccess;
+import org.apache.usergrid.security.shiro.principals.PrincipalIdentifier;
 import org.apache.usergrid.security.tokens.TokenInfo;
+import org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl;
 import org.apache.usergrid.security.tokens.exceptions.TokenException;
 import org.apache.usergrid.services.ServiceResults;
 import org.glassfish.jersey.server.mvc.Viewable;
@@ -47,7 +48,6 @@
 
 import static org.apache.usergrid.security.shiro.utils.SubjectUtils.isServiceAdmin;
 import static org.apache.usergrid.utils.ConversionUtils.string;
-import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_CENTRAL_URL;
 
 
 @Component( "org.apache.usergrid.rest.management.users.UserResource" )
@@ -64,7 +64,7 @@
 
     String errorMsg;
 
-    String token;
+    String token = null;
 
 
     public UserResource() {
@@ -73,6 +73,10 @@
 
     public UserResource init( UserInfo user ) {
         this.user = user;
+        PrincipalIdentifier userPrincipal  = (PrincipalIdentifier) SecurityUtils.getSubject().getPrincipal();
+        if ( userPrincipal != null && userPrincipal.getAccessTokenCredentials() != null ) {
+            this.token = userPrincipal.getAccessTokenCredentials().getToken();
+        }
         return this;
     }
 
@@ -98,6 +102,12 @@
                                         @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback )
             throws Exception {
 
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException(  "External SSO integration is enabled, admin users must update" +
+                " info via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
+        }
+
+
         if ( json == null ) {
             return null;
         }
@@ -132,6 +142,11 @@
                                                @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback )
             throws Exception {
 
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin users must reset passwords via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
+        }
+
         if ( json == null ) {
             return null;
         }
@@ -196,9 +211,11 @@
         ApiResponse response = createApiResponse();
         response.setAction( "get admin user" );
 
-        String token = management.getAccessTokenForAdminUser( user.getUuid(), ttl );
-        Map<String, Object> userOrganizationData = management.getAdminUserOrganizationData( user, !shallow );
-        userOrganizationData.put( "token", token );
+        // commenting out creation of token each time and setting the token value to the one sent in the request.
+        // String token = management.getAccessTokenForAdminUser( user.getUuid(), ttl );
+
+        Map<String, Object> userOrganizationData = management.getAdminUserOrganizationData( user, !shallow, !shallow);
+        //userOrganizationData.put( "token", token );
         response.setData( userOrganizationData );
         response.setSuccess();
 
@@ -211,12 +228,9 @@
     @Produces( MediaType.TEXT_HTML )
     public Viewable showPasswordResetForm( @Context UriInfo ui, @QueryParam( "token" ) String token ) {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must reset passwords via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin users must reset password via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
         }
 
         UUID organizationId = null;
@@ -258,12 +272,9 @@
             logger.trace("handlePasswordResetForm");
         }
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must reset passwords via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException(  "External SSO integration is enabled, admin users must reset password via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
         }
 
         UUID organizationId = null;
@@ -347,12 +358,9 @@
     @Produces( MediaType.TEXT_HTML )
     public Viewable activate( @Context UriInfo ui, @QueryParam( "token" ) String token ) {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must activate via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException(  "External SSO integration is enabled, admin users must activate via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
         }
 
         UUID organizationId = null;
@@ -380,12 +388,9 @@
     @Produces( MediaType.TEXT_HTML )
     public Viewable confirm( @Context UriInfo ui, @QueryParam( "token" ) String token ) {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must confirm via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin users must confirm " +
+                "via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER ) );
         }
 
         UUID organizationId = null;
@@ -419,12 +424,9 @@
                                        @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback )
             throws Exception {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must reactivate via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin user must re-activate " +
+                "via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER ) );
         }
 
         logger.info( "Send activation email for user: {}" , user.getUuid() );
@@ -446,6 +448,11 @@
                                              @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback )
             throws Exception {
 
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin user tokens must be revoked " +
+                "via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
+        }
+
         UUID adminId = user.getUuid();
 
         logger.info( "Revoking user tokens for {}", adminId );
@@ -478,6 +485,11 @@
                                             @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback,
                                             @QueryParam( "token" ) String token ) throws Exception {
 
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin user token must be revoked via " +
+                "via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER ) );
+        }
+
         UUID adminId = user.getUuid();
         this.token = token;
 
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UsersResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UsersResource.java
index ff279ef..6999841 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UsersResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/UsersResource.java
@@ -29,6 +29,7 @@
 import org.apache.usergrid.rest.exceptions.AuthErrorInfo;
 import org.apache.usergrid.rest.exceptions.RedirectionException;
 import org.apache.usergrid.security.shiro.utils.SubjectUtils;
+import org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl;
 import org.glassfish.jersey.server.mvc.Viewable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -44,7 +45,8 @@
 
 import static org.apache.commons.lang.StringUtils.isBlank;
 import static org.apache.usergrid.rest.exceptions.SecurityException.mappableSecurityException;
-import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_CENTRAL_URL;
+import static org.apache.usergrid.security.shiro.utils.SubjectUtils.isServiceAdmin;
+import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER_URL;
 
 
 @Component( "org.apache.usergrid.rest.management.users.UsersResource" )
@@ -114,12 +116,9 @@
                                        @QueryParam( "callback" ) @DefaultValue( "callback" ) String callback )
             throws Exception {
 
-        final boolean externalTokensEnabled =
-                !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ) );
-
-        if ( externalTokensEnabled ) {
-            throw new IllegalArgumentException( "Admin Users must signup via " +
-                    properties.getProperty( USERGRID_CENTRAL_URL ) );
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException(  "External SSO integration is enabled, admin users registering without an org" +
+                " must do so via provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
         }
 
         // email is only required parameter
@@ -137,7 +136,15 @@
         ApiResponse response = createApiResponse();
         response.setAction( "create user" );
 
-        UserInfo user = management.createAdminUser( null, username, name, email, password, false, false );
+
+        UserInfo user = null;
+        if ( tokens.isExternalSSOProviderEnabled() ){
+            //autoactivating user, since the activation is done via the external sso provider.
+            user = management.createAdminUser(null,username,name,email,password,true,false);
+        }
+        else {
+            user = management.createAdminUser(null, username, name, email, password, false, false);
+        }
         Map<String, Object> result = new LinkedHashMap<String, Object>();
         if ( user != null ) {
             result.put( "user", user );
@@ -148,31 +155,23 @@
             throw mappableSecurityException( AuthErrorInfo.BAD_CREDENTIALS_SYNTAX_ERROR );
         }
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.createAdminUserPostProcessing(user, null);
+
         return response;
     }
 
-	/*
-     * @POST
-	 *
-	 * @Consumes(MediaType.MULTIPART_FORM_DATA) public JSONWithPadding
-	 * createUserFromMultipart(@Context UriInfo ui,
-	 *
-	 * @FormDataParam("username") String username,
-	 *
-	 * @FormDataParam("name") String name,
-	 *
-	 * @FormDataParam("email") String email,
-	 *
-	 * @FormDataParam("password") String password) throws Exception {
-	 *
-	 * return createUser(ui, username, name, email, password); }
-	 */
-
 
     @GET
     @Path( "resetpw" )
     @Produces( MediaType.TEXT_HTML )
     public Viewable showPasswordResetForm( @Context UriInfo ui ) {
+
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin users must reset password via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
+        }
+
         return handleViewable( "resetpw_email_form", this );
     }
 
@@ -185,6 +184,11 @@
                                              @FormParam( "recaptcha_challenge_field" ) String challenge,
                                              @FormParam( "recaptcha_response_field" ) String uresponse ) {
 
+        if ( tokens.isExternalSSOProviderEnabled() && !isServiceAdmin() ) {
+            throw new IllegalArgumentException( "External SSO integration is enabled, admin users must reset password via" +
+                " provider: "+ properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER) );
+        }
+
         try {
             if ( isBlank( email ) ) {
                 errorMsg = "No email provided, try again...";
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/organizations/OrganizationsResource.java b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/organizations/OrganizationsResource.java
index dfbe7af..e9a5f53 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/organizations/OrganizationsResource.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/management/users/organizations/OrganizationsResource.java
@@ -95,6 +95,10 @@
 
         management.activateOrganization( organization );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.createOrganizationPostProcessing(organization, null);
+        management.addUserToOrganizationPostProcessing(user, organizationName, null);
+
         return response;
     }
 
@@ -122,6 +126,10 @@
 
         management.activateOrganization( organization );
 
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.createOrganizationPostProcessing(organization, null);
+        management.addUserToOrganizationPostProcessing(user, organizationName, null);
+
         return response;
     }
 
@@ -142,6 +150,10 @@
 
         OrganizationInfo organization = management.getOrganizationByName( organizationName );
         management.addAdminUserToOrganization( user, organization, true );
+
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organizationName, null);
+
         response.setData( organization );
         return response;
     }
@@ -160,6 +172,10 @@
 
         OrganizationInfo organization = management.getOrganizationByUuid( UUID.fromString( organizationIdStr ) );
         management.addAdminUserToOrganization( user, organization, true );
+
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.addUserToOrganizationPostProcessing(user, organization.getName(), null);
+
         response.setData( organization );
         return response;
     }
@@ -182,6 +198,10 @@
 
         OrganizationInfo organization = management.getOrganizationByUuid( UUID.fromString( organizationIdStr ) );
         management.removeAdminUserFromOrganization( user.getUuid(), organization.getUuid() );
+
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.removeUserFromOrganizationPostProcessing(user, organization.getName(), null);
+
         response.setData( organization );
         return response;
     }
@@ -203,6 +223,10 @@
         response.setAction( "remove user from organization" );
         OrganizationInfo organization = management.getOrganizationByName( organizationName );
         management.removeAdminUserFromOrganization( user.getUuid(), organization.getUuid() );
+
+        // DO NOT REMOVE - used for external classes to hook into any post-processing
+        management.removeUserFromOrganizationPostProcessing(user, organizationName, null);
+
         response.setData( organization );
 
         return response;
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/security/SecuredResourceFilterFactory.java b/stack/rest/src/main/java/org/apache/usergrid/rest/security/SecuredResourceFilterFactory.java
index 85e6210..ede6c35 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/security/SecuredResourceFilterFactory.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/security/SecuredResourceFilterFactory.java
@@ -254,7 +254,7 @@
                 logger.trace("SysadminLocalhostFilter.authorize");
             }
 
-            if (!request.getSecurityContext().isUserInRole( ROLE_SERVICE_ADMIN )) {
+            if ( !isServiceAdmin() && !isBasicAuthServiceAdmin(request)) {
                 // not a sysadmin request
                 return;
             }
@@ -303,7 +303,7 @@
                 logger.trace("OrganizationFilter.authorize");
             }
 
-            if ( !isPermittedAccessToOrganization( getOrganizationIdentifier() ) ) {
+            if ( !isPermittedAccessToOrganization( getOrganizationIdentifier() ) && !isBasicAuthServiceAdmin(request) ) {
                 if (logger.isTraceEnabled()) {
                     logger.trace("No organization access authorized");
                 }
@@ -375,7 +375,7 @@
                     throw mappableSecurityException( "unauthorized", "No application guest access authorized" );
                 }
             }
-            if ( !isPermittedAccessToApplication( getApplicationIdentifier() ) ) {
+            if ( !isPermittedAccessToApplication( getApplicationIdentifier() ) && !isBasicAuthServiceAdmin(request) ) {
                 throw mappableSecurityException( "unauthorized", "No application access authorized" );
             }
         }
@@ -397,7 +397,7 @@
                 logger.trace("SystemFilter.authorize");
             }
             try {
-                if (!request.getSecurityContext().isUserInRole( ROLE_SERVICE_ADMIN )) {
+                if (!isBasicAuthServiceAdmin(request) && !isServiceAdmin()) {
                     if (logger.isTraceEnabled()) {
                         logger.trace("You are not the system admin.");
                     }
@@ -429,7 +429,7 @@
                 if (logger.isTraceEnabled()) {
                     logger.trace("AdminUserFilter.authorize");
                 }
-                if (!isUser( getUserIdentifier() ) && !isServiceAdmin() ) {
+                if (!isUser( getUserIdentifier() ) && !isServiceAdmin() && !isBasicAuthServiceAdmin(request) ) {
                     throw mappableSecurityException( "unauthorized", "No admin user access authorized" );
                 }
             }
@@ -539,5 +539,11 @@
         }
     }
 
+    private static boolean isBasicAuthServiceAdmin(ContainerRequestContext request){
+
+        return request.getSecurityContext().isUserInRole( ROLE_SERVICE_ADMIN );
+
+    }
+
 
 }
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/BasicAuthSecurityFilter.java b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/BasicAuthSecurityFilter.java
index a5d7272..5594a1c 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/BasicAuthSecurityFilter.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/BasicAuthSecurityFilter.java
@@ -49,6 +49,9 @@
             logger.trace("Filtering: {}", request.getUriInfo().getBaseUri());
         }
 
+        if( bypassSecurityCheck(request) ){
+            return;
+        }
 
         Map<String, String> auth_types = getAuthTypes( request );
         if ( ( auth_types == null ) || !auth_types.containsKey( AUTH_BASIC_TYPE ) ) {
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/ClientCredentialsSecurityFilter.java b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/ClientCredentialsSecurityFilter.java
index 83e53c1..486d105 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/ClientCredentialsSecurityFilter.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/ClientCredentialsSecurityFilter.java
@@ -55,6 +55,10 @@
             logger.trace("Filtering: {}", request.getUriInfo().getBaseUri());
         }
 
+        if( bypassSecurityCheck(request) ){
+            return;
+        }
+
         String clientId = httpServletRequest.getParameter( "client_id" );
         String clientSecret = httpServletRequest.getParameter( "client_secret" );
 
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/OAuth2AccessTokenSecurityFilter.java b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/OAuth2AccessTokenSecurityFilter.java
index 03da0e8..7b35df6 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/OAuth2AccessTokenSecurityFilter.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/OAuth2AccessTokenSecurityFilter.java
@@ -27,6 +27,7 @@
 import org.apache.usergrid.management.ApplicationInfo;
 import org.apache.usergrid.management.OrganizationInfo;
 import org.apache.usergrid.management.UserInfo;
+import org.apache.usergrid.management.exceptions.ExternalSSOProviderAdminUserNotFoundException;
 import org.apache.usergrid.management.exceptions.ManagementException;
 import org.apache.usergrid.security.AuthPrincipalInfo;
 import org.apache.usergrid.security.AuthPrincipalType;
@@ -74,6 +75,10 @@
             logger.trace("Filtering: {}", request.getUriInfo().getBaseUri());
         }
 
+        if( bypassSecurityCheck(request) ){
+            return;
+        }
+
         try {
             try {
 
@@ -104,7 +109,10 @@
                     throw mappableSecurityException( EXPIRED_ACCESS_TOKEN_ERROR );
                 } catch (InvalidTokenException ite) {
                     throw mappableSecurityException( INVALID_AUTH_ERROR );
-                } catch (IndexOutOfBoundsException ioobe) {
+                }
+                catch (ExternalSSOProviderAdminUserNotFoundException eAdminUserNotFound){
+                    throw mappableSecurityException(EXTERNALSSOPROVIDER_UNACTIVATED_ADMINUSER);
+                } catch(IndexOutOfBoundsException ioobe) {
                     // token is just some rubbish string
                     throw mappableSecurityException( BAD_ACCESS_TOKEN_ERROR );
                 } catch (Exception e) {
diff --git a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/SecurityFilter.java b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/SecurityFilter.java
index e0dadba..817464f 100644
--- a/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/SecurityFilter.java
+++ b/stack/rest/src/main/java/org/apache/usergrid/rest/security/shiro/filters/SecurityFilter.java
@@ -132,4 +132,16 @@
         }
         return auth_types;
     }
+
+    public static boolean bypassSecurityCheck( ContainerRequestContext request ){
+
+        // if this is a CORS Pre-Flight request, we can skip the security check
+        // OPTIONS requests do not have access into Usergrid data, Jersey default handles these requests
+        if( request.getMethod().equalsIgnoreCase("options") ){
+            return true;
+        }
+
+        return false;
+
+    }
 }
diff --git a/stack/rest/src/test/java/org/apache/usergrid/rest/ITSetup.java b/stack/rest/src/test/java/org/apache/usergrid/rest/ITSetup.java
index 510c992..642ae48 100644
--- a/stack/rest/src/test/java/org/apache/usergrid/rest/ITSetup.java
+++ b/stack/rest/src/test/java/org/apache/usergrid/rest/ITSetup.java
@@ -17,23 +17,22 @@
 package org.apache.usergrid.rest;
 
 
-import java.net.URI;
-import java.util.Properties;
-
-import javax.ws.rs.core.UriBuilder;
-
 import org.apache.usergrid.cassandra.SpringResource;
-import org.apache.usergrid.management.ApplicationCreator;
 import org.apache.usergrid.management.ManagementService;
 import org.apache.usergrid.persistence.EntityManagerFactory;
-import org.apache.usergrid.security.providers.SignInProviderFactory;
-import org.apache.usergrid.security.tokens.TokenService;
-import org.apache.usergrid.services.ServiceManagerFactory;
 import org.apache.usergrid.setup.ConcurrentProcessSingleton;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Properties;
 
 
 /** A {@link org.junit.rules.TestRule} that sets up services. */
-public class ITSetup  {
+public class ITSetup implements TestRule {
+    private static final Logger logger = LoggerFactory.getLogger( ITSetup.class );
 
     private static ITSetup instance;
 
@@ -56,7 +55,7 @@
 
     }
 
-    public static synchronized ITSetup getInstance(){
+    public static synchronized ITSetup getInstance() {
         if(instance == null){
             instance = new ITSetup();
         }
@@ -79,4 +78,39 @@
         return properties;
     }
 
+    public SpringResource getSpringResource() {
+        return springResource;
+    }
+
+
+    @Override
+    public Statement apply( Statement base, Description description ) {
+        return statement( base, description );
+    }
+
+
+    private Statement statement( final Statement base, final Description description ) {
+        return new Statement() {
+            @Override
+            public void evaluate() throws Throwable {
+                before( description );
+
+                try {
+                    base.evaluate();
+                }
+                finally {
+                    after( description );
+                }
+            }
+        };
+    }
+
+    protected void before( Description description ) throws Throwable {
+        logger.info( "Setting up for {}", description.getDisplayName() );
+    }
+
+    protected void after( Description description ) {
+        logger.info( "Tearing down for {}", description.getDisplayName() );
+    }
+
 }
diff --git a/stack/rest/src/test/java/org/apache/usergrid/rest/applications/events/EventsResourceIT.java b/stack/rest/src/test/java/org/apache/usergrid/rest/applications/events/EventsResourceIT.java
index f4f8630..965105b 100644
--- a/stack/rest/src/test/java/org/apache/usergrid/rest/applications/events/EventsResourceIT.java
+++ b/stack/rest/src/test/java/org/apache/usergrid/rest/applications/events/EventsResourceIT.java
@@ -17,26 +17,21 @@
 package org.apache.usergrid.rest.applications.events;
 
 
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import javax.ws.rs.core.MediaType;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import java.io.IOException;
-
 import org.apache.usergrid.rest.test.resource.AbstractRestIT;
 import org.apache.usergrid.rest.test.resource.model.ApiResponse;
 import org.apache.usergrid.rest.test.resource.model.Collection;
-import org.apache.usergrid.rest.test.resource.model.Token;
+import org.apache.usergrid.rest.test.resource.model.QueryParameters;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.Map;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import org.junit.Ignore;
 
 
 public class EventsResourceIT extends AbstractRestIT {
@@ -45,7 +40,6 @@
 
 
     @Test
-    @Ignore("Pending https://issues.apache.org/jira/browse/USERGRID-1118. Events not working yet")
     public void testEventPostandGet() throws IOException {
 
         Map<String, Object> payload = new LinkedHashMap<String, Object>();
@@ -108,23 +102,25 @@
             collection = this.app().collection( "events" )
                 .get();
 
-            assertEquals("Expected Advertising", advertising, ((Map<String, Object>) ((Map<String, Object>) collection.getResponse().getProperties().get("messages")).get(0)).get("uuid").toString());
+           assertEquals("Expected Advertising", advertising, ((Map)((ArrayList) collection.getResponse().getProperties().get("messages" )).get(0 ) ).get("uuid" ).toString());
             lastId = collection.getResponse().getProperties().get("last").toString();
         }
 
         // check sales event in queue
-        collection = this.app().collection( "events" )
-            .get();
+        QueryParameters queryParameters = new QueryParameters();
+        queryParameters.addParam( "last",lastId );
+
+        collection = this.app().collection( "events" ).get(queryParameters);
 
 
-        assertEquals( "Expected Sales", sales,((Map<String, Object>) ((Map<String, Object>) collection.getResponse().getProperties().get("messages")).get(0)).get("uuid").toString());
+        assertEquals( "Expected Sales", sales,((Map<String, Object>) ((ArrayList) collection.getResponse().getProperties().get("messages")).get(0)).get("uuid").toString());
         lastId = collection.getResponse().getProperties().get("last").toString();
 
 
         // check marketing event in queue
-        collection = this.app().collection( "events" )
-            .get();
+        queryParameters.addParam( "last",lastId );
+        collection = this.app().collection( "events" ).get(queryParameters);
 
-        assertEquals( "Expected Marketing", marketing, ((Map<String, Object>) ((Map<String, Object>) collection.getResponse().getProperties().get("messages")).get(0)).get("uuid").toString());
+        assertEquals( "Expected Marketing", marketing, ((Map<String, Object>) ((ArrayList) collection.getResponse().getProperties().get("messages")).get(0)).get("uuid").toString());
     }
 }
diff --git a/stack/rest/src/test/java/org/apache/usergrid/rest/management/ExternalSSOEnabledIT.java b/stack/rest/src/test/java/org/apache/usergrid/rest/management/ExternalSSOEnabledIT.java
new file mode 100644
index 0000000..cae65df
--- /dev/null
+++ b/stack/rest/src/test/java/org/apache/usergrid/rest/management/ExternalSSOEnabledIT.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.usergrid.rest.management;
+
+import io.jsonwebtoken.Jwts;
+import io.jsonwebtoken.SignatureAlgorithm;
+import io.jsonwebtoken.impl.crypto.RsaProvider;
+import org.apache.commons.collections4.map.HashedMap;
+import org.apache.usergrid.cassandra.SpringResource;
+import org.apache.usergrid.persistence.index.utils.UUIDUtils;
+import org.apache.usergrid.rest.test.resource.AbstractRestIT;
+import org.apache.usergrid.rest.test.resource.RestClient;
+import org.apache.usergrid.rest.test.resource.model.ApiResponse;
+import org.apache.usergrid.rest.test.resource.model.Entity;
+import org.apache.usergrid.security.sso.ApigeeSSO2Provider;
+import org.codehaus.jackson.JsonNode;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.security.*;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+/**
+ * Created by ayeshadastagiri on 7/20/16.
+ */
+@Ignore("Need to figure out a way to set the public key for Mock server.")
+public class ExternalSSOEnabledIT extends AbstractRestIT {
+
+    Key key;
+    PublicKey publicKey;
+    PrivateKey privateKey;
+    String compactJws;
+    String username = "SSOadminuser" + UUIDUtils.newTimeUUID();
+    ApigeeSSO2Provider apigeeSSO2ProviderTest;
+    //SSO2 implementation
+    public static final String USERGRID_EXTERNAL_SSO_ENABLED = "usergrid.external.sso.enabled";
+    public static final String USERGRID_EXTERNAL_PROVIDER =    "usergrid.external.sso.provider";
+
+    public ExternalSSOEnabledIT() throws Exception {
+
+    }
+
+    @Before
+    public void setup() throws NoSuchAlgorithmException {
+        generateKey();
+    }
+
+    private void generateKey() {
+        KeyPair kp = RsaProvider.generateKeyPair(1024);
+        publicKey = kp.getPublic();
+        privateKey = kp.getPrivate();
+    }
+
+    private String genrateToken(){
+        Map<String, Object> claims = new HashedMap<String, Object>();
+        claims.put("jti","c7df0339-3847-450b-a925-628ef237953a");
+        claims.put("sub","b6d62259-217b-4e96-8f49-e00c366e4fed");
+        claims.put("scope","size = 5");
+        claims.put("client_id", "edgecli");
+        claims.put("azp","edgecli");
+        claims.put("grant_type" ,"password");
+        claims.put("user_id","b6d62259-217b-4e96-8f49-e00c366e4fed");
+        claims.put( "origin","usergrid");
+        claims.put("user_name","AyeshaSSOUser");
+        claims.put("email", "adastagiri+ssotesting@apigee.com");
+        claims.put( "rev_sig","dfe5d0d3");
+        claims.put("iat","1466550862");
+        claims.put("exp", System.currentTimeMillis() + 1000);
+        claims.put("iss", "https://login.apigee.com/oauth/token");
+        claims.put( "zid","uaa");
+        claims.put( "aud"," size = 6");
+        claims.put("grant_type","password");
+
+        String jwt = Jwts.builder().setClaims(claims).signWith(SignatureAlgorithm.RS256, privateKey).compact();
+        return jwt;
+
+    }
+
+    @Test
+    public void SuperUserTestsFor() throws NoSuchAlgorithmException {
+
+        // create a admin user.
+        RestClient restClient = clientSetup.getRestClient();
+
+        //Create adminUser values
+        Entity adminUserPayload = new Entity();
+        adminUserPayload.put("username", "TestUser");
+        adminUserPayload.put("name", username);
+        adminUserPayload.put("email", "adastagiri+ssotesting@apigee.com");
+        adminUserPayload.put("password", username);
+
+        //create adminUser
+        ApiResponse adminUserEntityResponse = management().orgs().org(clientSetup.getOrganizationName()).users().post(ApiResponse.class, adminUserPayload);
+
+        Entity adminUserResponse = new Entity(adminUserEntityResponse);
+        //verify that the response contains the correct data
+        assertNotNull(adminUserResponse);
+        assertEquals("TestUser", adminUserResponse.get("username"));
+
+        Map<String, String> props = new HashMap<String, String>();
+
+        props.put( USERGRID_EXTERNAL_SSO_ENABLED, "true" );
+        props.put( USERGRID_EXTERNAL_PROVIDER, "apigee" );
+        pathResource( "testproperties" ).post( props );
+
+        // /management/me --> superuser and query params --> Generate a super usesr token.
+        Map<String, Object> loginInfo = new HashMap<String, Object>() {{
+            put( "username", "superuser" );
+            put( "password", "superpassword" );
+            put( "grant_type", "password" );
+        }};
+        ApiResponse postResponse2 = pathResource( "management/token" ).post( false,ApiResponse.class,loginInfo );
+        assertTrue(postResponse2.getAccessToken() != null );
+
+
+        // /orgs  create an org with superuser credentials.
+        // /management/me --> superuser and query params --> Generate a super usesr token.
+        Map<String, Object> orgDetails = new HashMap<String, Object>() {{
+            put( "email", "adastagiri+ssotesting@apigee.com" );
+            put( "name", "testuser" );
+            put( "organization", username );
+        }};
+
+        context().getToken().put("access_token",postResponse2.getAccessToken());
+        postResponse2 = pathResource( "management/orgs" ).post( true,ApiResponse.class,orgDetails);
+        assertTrue(postResponse2.getData() != null);
+
+        postResponse2 = pathResource("management/orgs").get(ApiResponse.class,true);
+        assertTrue(postResponse2 != null);
+
+
+        compactJws = genrateToken();
+
+        SpringResource.getInstance().getAppContext().getBean(ApigeeSSO2Provider.class).setPublicKey( publicKey  );
+        context().getToken().put("access_token",compactJws);
+        // /management/me --> admin user and jwt token. Return the user information and "token" should have jwt token.
+        JsonNode responseToken = management().me().get(JsonNode.class,true);
+        assertTrue(responseToken.get("access_token") != null);
+
+
+        // /management/me --> admin and query params --> Generate a super usesr token.
+        Map<String, Object> loginInfo1 = new HashMap<String, Object>() {{
+            put( "username", "TestUser" );
+            put( "password", username );
+            put( "grant_type", "password" );
+        }};
+
+        // /managment/token -> adminusername and password --> should fail.
+        ApiResponse postResponse1 = pathResource("management/token").post(false, ApiResponse.class,loginInfo1);
+//        fail( "External SSO integration is enabled, admin users must login via provider: "+ USERGRID_EXTERNAL_SSO_PROVIDER_URL);
+
+
+
+
+    }
+}
diff --git a/stack/rest/src/test/java/org/apache/usergrid/rest/management/ManagementResourceIT.java b/stack/rest/src/test/java/org/apache/usergrid/rest/management/ManagementResourceIT.java
index 6bf9117..1da00d4 100644
--- a/stack/rest/src/test/java/org/apache/usergrid/rest/management/ManagementResourceIT.java
+++ b/stack/rest/src/test/java/org/apache/usergrid/rest/management/ManagementResourceIT.java
@@ -25,6 +25,7 @@
 import org.apache.usergrid.rest.test.resource.AbstractRestIT;
 import org.apache.usergrid.rest.test.resource.model.*;
 import org.apache.usergrid.rest.test.resource.model.Collection;
+import org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -37,7 +38,8 @@
 import java.io.IOException;
 import java.util.*;
 
-import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_CENTRAL_URL;
+import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER_URL;
+import static org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.USERGRID_EXTERNAL_SSO_ENABLED;
 import static org.apache.usergrid.utils.MapUtils.hashMap;
 import static org.junit.Assert.*;
 
@@ -632,7 +634,7 @@
 
         String suToken = clientSetup.getSuperuserToken().getAccessToken();
         Map<String, String> props = new HashMap<String, String>();
-        props.put( USERGRID_CENTRAL_URL, getBaseURI().toURL().toExternalForm() );
+        props.put(USERGRID_EXTERNAL_SSO_PROVIDER_URL, getBaseURI().toURL().toExternalForm() );
         pathResource( "testproperties" ).post( props );
 
 
@@ -652,7 +654,7 @@
 
         // unset the Usergrid Central SSO URL so it does not interfere with other tests
 
-        props.put( USERGRID_CENTRAL_URL, "" );
+        props.put(USERGRID_EXTERNAL_SSO_PROVIDER_URL, "" );
         pathResource( "testproperties" ).post( props );
     }
 
@@ -671,7 +673,8 @@
 
         String suToken = clientSetup.getSuperuserToken().getAccessToken();
         Map<String, String> props = new HashMap<String, String>();
-        props.put( USERGRID_CENTRAL_URL, getBaseURI().toURL().toExternalForm() );
+        props.put(USERGRID_EXTERNAL_SSO_ENABLED, "true");
+        props.put(USERGRID_EXTERNAL_SSO_PROVIDER_URL, getBaseURI().toURL().toExternalForm() );
         pathResource( "testproperties" ).post( props );
 
         try {
@@ -685,14 +688,15 @@
                     put( "grant_type", "password" );
                 }};
                 ApiResponse postResponse = pathResource( "management/token" ).post( false, ApiResponse.class, loginInfo );
-                fail( "Login as Admin User must fail when validate external tokens is enabled" );
+                fail( "External SSO integration is enabled, admin users must login via provider using configured property: "+
+                    TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER );
 
             } catch (ClientErrorException actual) {
                 assertEquals( 400, actual.getResponse().getStatus() );
                 String errorMsg = actual.getResponse().readEntity( JsonNode.class )
                     .get( "error_description" ).toString();
                 logger.error( "ERROR: " + errorMsg );
-                assertTrue( errorMsg.contains( "Admin Users must login via" ) );
+                assertTrue( errorMsg.contains( "admin users must login via" ) );
 
             } catch (Exception e) {
                 fail( "We expected a ClientErrorException" );
@@ -709,11 +713,31 @@
             String accessToken = postResponse2.getAccessToken();
             assertNotNull( accessToken );
 
+            //Superuser : GET -> get tokenInfo with access_token
+            ApiResponse getResponse3 = pathResource("management/me").get(ApiResponse.class,new QueryParameters()
+                .addParam("grant_type", "password").addParam("password", "superpassword")
+                .addParam("username", "superuser"),false);
+
+            assertNotNull(getResponse3.getAccessToken());
+
+            //Superuser : POST -> Add org using super user credentials.
+            Map<String, Object> orgAdminUserInfo = new HashMap<String, Object>() {{
+                put( "username", username+"test" );
+                put("password","RandomPassword");
+                put("email",username+"@gmail.com");
+                put( "organization", username+"RandomOrgName" );
+            }};
+            ApiResponse postResponse4 = pathResource("management/orgs")
+                .post(false,orgAdminUserInfo,new QueryParameters().addParam("access_token",getResponse3.getAccessToken()));
+            assertNotNull(postResponse4.getData());
+
+
         } finally {
 
             // turn off validate external tokens by un-setting the usergrid.central.url
 
-            props.put( USERGRID_CENTRAL_URL, "" );
+            props.put(USERGRID_EXTERNAL_SSO_PROVIDER_URL, "" );
+            props.put(USERGRID_EXTERNAL_SSO_ENABLED, "");
             pathResource( "testproperties" ).post( props );
         }
     }
diff --git a/stack/services/pom.xml b/stack/services/pom.xml
index c634f1c..23dc9c7 100644
--- a/stack/services/pom.xml
+++ b/stack/services/pom.xml
@@ -175,6 +175,13 @@
             <artifactId>shiro-core</artifactId>
         </dependency>
 
+        <!-- http://mvnrepository.com/artifact/io.jsonwebtoken/jjwt -->
+        <dependency>
+            <groupId>io.jsonwebtoken</groupId>
+            <artifactId>jjwt</artifactId>
+            <version>0.6.0</version>
+        </dependency>
+
         <dependency>
             <groupId>org.apache.shiro</groupId>
             <artifactId>shiro-spring</artifactId>
diff --git a/stack/services/src/main/java/org/apache/usergrid/management/ManagementService.java b/stack/services/src/main/java/org/apache/usergrid/management/ManagementService.java
index 1d74ec3..5ac1713 100644
--- a/stack/services/src/main/java/org/apache/usergrid/management/ManagementService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/management/ManagementService.java
@@ -157,7 +157,7 @@
 
 	UserInfo getAdminUserInfoFromAccessToken( String token ) throws Exception;
 
-	Map<String, Object> getAdminUserOrganizationData( UserInfo user, boolean deep ) throws Exception;
+	Map<String, Object> getAdminUserOrganizationData(UserInfo user, boolean includeApps, boolean includeOrgUsers) throws Exception;
 
 	Map<String, Object> getAdminUserOrganizationData( UUID userId ) throws Exception;
 
@@ -371,4 +371,21 @@
 	void updateOrganizationConfig( OrganizationConfig organizationConfig ) throws Exception;
 
 	Observable<Id> deleteAllEntities(final UUID applicationId,final int limit);
+
+
+    // DO NOT REMOVE BELOW METHODS, THEY ARE HERE TO ALLOW EXTERNAL CLASSES TO OVERRIDE AND HOOK INTO POST PROCESSING
+    void createOrganizationPostProcessing( final OrganizationInfo orgInfo,
+                                           final Map<String, String> properties ) throws Exception;
+
+    void createAdminUserPostProcessing( final UserInfo userInfo,
+                                        final Map<String, String> properties ) throws Exception;
+
+    void addUserToOrganizationPostProcessing( final UserInfo userInfo,
+                                              final String organizationName,
+                                              final Map<String, String> properties ) throws Exception;
+
+    void removeUserFromOrganizationPostProcessing( final UserInfo userInfo,
+                                              final String organizationName,
+                                              final Map<String, String> properties ) throws Exception;
+
 }
diff --git a/stack/services/src/main/java/org/apache/usergrid/management/cassandra/AccountCreationPropsImpl.java b/stack/services/src/main/java/org/apache/usergrid/management/cassandra/AccountCreationPropsImpl.java
index 7c6a091..552f74b 100644
--- a/stack/services/src/main/java/org/apache/usergrid/management/cassandra/AccountCreationPropsImpl.java
+++ b/stack/services/src/main/java/org/apache/usergrid/management/cassandra/AccountCreationPropsImpl.java
@@ -86,7 +86,9 @@
     public String getProperty( String name ) {
         String propertyValue = properties.getProperty( name );
         if ( isBlank( propertyValue ) ) {
-            logger.warn( "Missing value for {}", name );
+            if ( logger.isDebugEnabled() ) {
+                logger.debug("Missing value for {}", name);
+            }
             propertyValue = null;
         }
         return propertyValue;
diff --git a/stack/services/src/main/java/org/apache/usergrid/management/cassandra/ManagementServiceImpl.java b/stack/services/src/main/java/org/apache/usergrid/management/cassandra/ManagementServiceImpl.java
index b56f211..21c6983 100644
--- a/stack/services/src/main/java/org/apache/usergrid/management/cassandra/ManagementServiceImpl.java
+++ b/stack/services/src/main/java/org/apache/usergrid/management/cassandra/ManagementServiceImpl.java
@@ -548,11 +548,24 @@
             if ( !validateAdminInfo( username, name, email, password ) ) {
                 return null;
             }
-            if ( areActivationChecksDisabled() ) {
-                user = createAdminUserInternal( null, username, name, email, password, true, false, userProperties );
+
+            // sysadmin can omit password field in the request and that will try to fetch an existing admin user to
+            // associate to the requested organization
+            if((password == null || password.isEmpty()) && SubjectUtils.isServiceAdmin()){
+                user = getAdminUserByEmail(email);
+                if(user == null ){
+                    throw new IllegalArgumentException("Password should be sent in the request or should be a valid admin user email.");
+                }
             }
-            else {
-                user = createAdminUserInternal( null, username, name, email, password, activated, disabled, userProperties );
+
+
+            if(user == null) {
+                // if external SSO is enabled and we're adding a user to an org, auto activate the user
+                if (tokens.isExternalSSOProviderEnabled() || areActivationChecksDisabled()) {
+                    user = createAdminUser(null, username, name, email, password, true, false, userProperties);
+                } else {
+                    user = createAdminUser(null, username, name, email, password, activated, disabled, userProperties);
+                }
             }
 
             if(logger.isTraceEnabled()){
@@ -903,10 +916,13 @@
             user.getEmail(), user.getConfirmed(), user.getActivated(), user.getDisabled(),
             user.getDynamicProperties(), true );
 
+
         // special case for sysadmin and test account only
         if (    !user.getEmail().equals( properties.getProperty( PROPERTIES_SYSADMIN_LOGIN_EMAIL ) )
              && !user.getEmail().equals( properties .getProperty( PROPERTIES_TEST_ACCOUNT_ADMIN_USER_EMAIL ) ) ) {
-            this.startAdminUserActivationFlow( organizationId, userInfo );
+            if(!tokens.isExternalSSOProviderEnabled()) {
+                this.startAdminUserActivationFlow(organizationId, userInfo);
+            }
         }
 
         return userInfo;
@@ -951,7 +967,7 @@
     }
 
 
-    private boolean validateAdminInfo( String username, String name, String email, String password ) throws Exception {
+    protected boolean validateAdminInfo( String username, String name, String email, String password ) throws Exception {
         if ( email == null ) {
             return false;
         }
@@ -961,18 +977,18 @@
 
         EntityManager em = emf.getEntityManager( smf.getManagementAppId() );
 
-        if ( !em.isPropertyValueUniqueForEntity( "user", "username", username ) ) {
+        if ( !( tokens.isExternalSSOProviderEnabled() && SubjectUtils.isServiceAdmin()) && !em.isPropertyValueUniqueForEntity( "user", "username", username ) ) {
             throw new DuplicateUniquePropertyExistsException( "user", "username", username );
         }
 
-        if ( !em.isPropertyValueUniqueForEntity( "user", "email", email ) ) {
+        if ( !(tokens.isExternalSSOProviderEnabled()&& SubjectUtils.isServiceAdmin())  && !em.isPropertyValueUniqueForEntity( "user", "email", email ) ) {
             throw new DuplicateUniquePropertyExistsException( "user", "email", email );
         }
         return true;
     }
 
 
-    private UserInfo createAdminUserInternal( UUID organizationId, String username, String name, String email, String password,
+    protected UserInfo createAdminUserInternal( UUID organizationId, String username, String name, String email, String password,
                                               boolean activated, boolean disabled, Map<String, Object> userProperties )
             throws Exception {
         logger.info( "createAdminUserInternal: {}", username );
@@ -1525,6 +1541,7 @@
 
     @Override
     public String getAccessTokenForAdminUser( UUID userId, long duration ) throws Exception {
+
         return getTokenForPrincipal( ACCESS, null, smf.getManagementAppId(), ADMIN_USER, userId, duration );
     }
 
@@ -1618,7 +1635,7 @@
     @Override
     public Map<String, Object> getAdminUserOrganizationData( UUID userId ) throws Exception {
         UserInfo user = getAdminUserByUuid( userId );
-        return getAdminUserOrganizationData( user, true );
+        return getAdminUserOrganizationData( user, true, true);
     }
 
 
@@ -1630,7 +1647,7 @@
 
 
     @Override
-    public Map<String, Object> getAdminUserOrganizationData( UserInfo user, boolean deep ) throws Exception {
+    public Map<String, Object> getAdminUserOrganizationData(UserInfo user, boolean includeApps, boolean includeOrgUsers) throws Exception {
 
         Map<String, Object> json = new HashMap<>();
 
@@ -1659,10 +1676,11 @@
             jsonOrganization.put( PROPERTY_UUID, organization.getKey() );
             jsonOrganization.put( "properties", getOrganizationByUuid( organization.getKey() ).getProperties() );
 
-            if ( deep ) {
-                BiMap<UUID, String> applications = getApplicationsForOrganization( organization.getKey() );
-                jsonOrganization.put( "applications", applications.inverse() );
-
+            if ( includeApps ) {
+                BiMap<UUID, String> applications = getApplicationsForOrganization(organization.getKey());
+                jsonOrganization.put("applications", applications.inverse());
+            }
+            if ( includeOrgUsers ){
                 List<UserInfo> users = getAdminUsersForOrganization( organization.getKey() );
                 Map<String, Object> jsonUsers = new HashMap<>();
                 for ( UserInfo u : users ) {
@@ -1722,7 +1740,9 @@
         invalidateManagementAppAuthCache();
 
         if ( email ) {
-            sendAdminUserInvitedEmail( user, organization );
+            if(!tokens.isExternalSSOProviderEnabled()) {
+                sendAdminUserInvitedEmail(user, organization);
+            }
         }
     }
 
@@ -3473,4 +3493,29 @@
         scopedCache.invalidate();
         localShiroCache.invalidateAll();
     }
+
+    @Override
+    public void createOrganizationPostProcessing( final OrganizationInfo orgInfo,
+                                                  final Map<String,String> properties ){
+        // do nothing, this is a hook for any classes extending the ManagementServiceInterface
+
+    }
+
+    @Override
+    public void createAdminUserPostProcessing( final UserInfo userInfo, final Map<String,String> properties){
+        // do nothing, this is a hook for any classes extending the ManagementServiceInterface
+    }
+
+    @Override
+    public void addUserToOrganizationPostProcessing( final UserInfo userInfo, final String organizationName,
+                                                          final Map<String,String> properties){
+        // do nothing, this is a hook for any classes extending the ManagementServiceInterface
+    }
+
+    @Override
+    public void removeUserFromOrganizationPostProcessing( final UserInfo userInfo, final String organizationName,
+                                                     final Map<String,String> properties){
+        // do nothing, this is a hook for any classes extending the ManagementServiceInterface
+    }
+
 }
diff --git a/stack/services/src/main/java/org/apache/usergrid/management/exceptions/ExternalSSOProviderAdminUserNotFoundException.java b/stack/services/src/main/java/org/apache/usergrid/management/exceptions/ExternalSSOProviderAdminUserNotFoundException.java
new file mode 100644
index 0000000..dabe9b9
--- /dev/null
+++ b/stack/services/src/main/java/org/apache/usergrid/management/exceptions/ExternalSSOProviderAdminUserNotFoundException.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.usergrid.management.exceptions;
+
+/**
+ * Created by ayeshadastagiri on 7/8/16.
+ */
+public class ExternalSSOProviderAdminUserNotFoundException extends ManagementException {
+
+    public ExternalSSOProviderAdminUserNotFoundException(){super();}
+    public ExternalSSOProviderAdminUserNotFoundException(String arg0){super(arg0);}
+
+}
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/shiro/utils/SubjectUtils.java b/stack/services/src/main/java/org/apache/usergrid/security/shiro/utils/SubjectUtils.java
index 3b4b37d..822e2c0 100644
--- a/stack/services/src/main/java/org/apache/usergrid/security/shiro/utils/SubjectUtils.java
+++ b/stack/services/src/main/java/org/apache/usergrid/security/shiro/utils/SubjectUtils.java
@@ -17,34 +17,28 @@
 package org.apache.usergrid.security.shiro.utils;
 
 
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-
+import com.google.common.collect.BiMap;
 import com.google.common.collect.HashBiMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.usergrid.management.ApplicationInfo;
-import org.apache.usergrid.management.OrganizationInfo;
-import org.apache.usergrid.management.UserInfo;
-import org.apache.usergrid.security.shiro.PrincipalCredentialsToken;
-import org.apache.usergrid.security.shiro.principals.UserPrincipal;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.shiro.SecurityUtils;
 import org.apache.shiro.UnavailableSecurityManagerException;
 import org.apache.shiro.session.Session;
 import org.apache.shiro.subject.Subject;
+import org.apache.usergrid.management.ApplicationInfo;
+import org.apache.usergrid.management.OrganizationInfo;
+import org.apache.usergrid.management.UserInfo;
+import org.apache.usergrid.persistence.index.query.Identifier;
+import org.apache.usergrid.security.shiro.PrincipalCredentialsToken;
+import org.apache.usergrid.security.shiro.principals.UserPrincipal;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.BiMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 
 import static org.apache.commons.lang.StringUtils.isNotBlank;
-import org.apache.usergrid.persistence.index.query.Identifier;
-import static org.apache.usergrid.security.shiro.Realm.ROLE_ADMIN_USER;
-import static org.apache.usergrid.security.shiro.Realm.ROLE_APPLICATION_ADMIN;
-import static org.apache.usergrid.security.shiro.Realm.ROLE_APPLICATION_USER;
-import static org.apache.usergrid.security.shiro.Realm.ROLE_ORGANIZATION_ADMIN;
-import static org.apache.usergrid.security.shiro.Realm.ROLE_SERVICE_ADMIN;
+import static org.apache.usergrid.security.shiro.Realm.*;
 
 
 public class SubjectUtils {
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/sso/ApigeeSSO2Provider.java b/stack/services/src/main/java/org/apache/usergrid/security/sso/ApigeeSSO2Provider.java
new file mode 100644
index 0000000..8ee8e03
--- /dev/null
+++ b/stack/services/src/main/java/org/apache/usergrid/security/sso/ApigeeSSO2Provider.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.security.sso;
+
+import io.jsonwebtoken.*;
+import org.apache.usergrid.corepersistence.util.CpNamingUtils;
+import org.apache.usergrid.management.ManagementService;
+import org.apache.usergrid.management.UserInfo;
+import org.apache.usergrid.management.exceptions.ExternalSSOProviderAdminUserNotFoundException;
+import org.apache.usergrid.security.AuthPrincipalInfo;
+import org.apache.usergrid.security.AuthPrincipalType;
+import org.apache.usergrid.security.tokens.TokenInfo;
+import org.apache.usergrid.security.tokens.exceptions.BadTokenException;
+import org.apache.usergrid.security.tokens.exceptions.ExpiredTokenException;
+import org.apache.usergrid.utils.JsonUtils;
+import org.apache.usergrid.utils.UUIDUtils;
+import org.glassfish.jersey.client.ClientConfig;
+import org.glassfish.jersey.jackson.JacksonFeature;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import javax.ws.rs.client.Client;
+import javax.ws.rs.client.ClientBuilder;
+import java.security.KeyFactory;
+import java.security.NoSuchAlgorithmException;
+import java.security.PublicKey;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.X509EncodedKeySpec;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.commons.codec.binary.Base64.decodeBase64;
+
+/**
+ * Created by ayeshadastagiri on 6/22/16.
+ */
+public class ApigeeSSO2Provider implements ExternalSSOProvider {
+
+    private static final Logger logger = LoggerFactory.getLogger(ApigeeSSO2Provider.class);
+    private static final String RESPONSE_PUBLICKEY_VALUE = "value";
+    protected Properties properties;
+    protected ManagementService management;
+    protected Client client;
+    protected PublicKey publicKey;
+
+    public static final String USERGRID_EXTERNAL_PUBLICKEY_URL = "usergrid.external.sso.url";
+
+    public ApigeeSSO2Provider() {
+        ClientConfig clientConfig = new ClientConfig();
+        clientConfig.register(new JacksonFeature());
+        client = ClientBuilder.newClient(clientConfig);
+    }
+
+    public PublicKey getPublicKey(String keyUrl) {
+
+        if(keyUrl != null && !keyUrl.isEmpty()) {
+            try {
+                Map<String, Object> publicKey = client.target(keyUrl).request().get(Map.class);
+                String ssoPublicKey = publicKey.get(RESPONSE_PUBLICKEY_VALUE).toString().split("----\n")[1].split("\n---")[0];
+                byte[] publicBytes = decodeBase64(ssoPublicKey);
+                X509EncodedKeySpec keySpec = new X509EncodedKeySpec(publicBytes);
+                KeyFactory keyFactory = KeyFactory.getInstance("RSA");
+                PublicKey pubKey = keyFactory.generatePublic(keySpec);
+                return pubKey;
+            }
+            catch(Exception e){
+                throw new IllegalArgumentException("error getting public key");
+            }
+        }
+
+        return null;
+    }
+
+    @Override
+    public TokenInfo validateAndReturnTokenInfo(String token, long ttl) throws Exception {
+
+
+        UserInfo userInfo = validateAndReturnUserInfo(token, ttl);
+
+        if(userInfo == null){
+            throw new ExternalSSOProviderAdminUserNotFoundException("Unable to load user from token: "+token);
+        }
+
+        return new TokenInfo(UUIDUtils.newTimeUUID(), "access", 1, 1, 1, ttl,
+                new AuthPrincipalInfo(AuthPrincipalType.ADMIN_USER, userInfo.getUuid(),
+                    CpNamingUtils.MANAGEMENT_APPLICATION_ID), null);
+
+    }
+
+    @Override
+    public UserInfo validateAndReturnUserInfo(String token, long ttl) throws Exception {
+
+        Jws<Claims> payload = getClaims(token);
+
+        // this step super important to ensure the token is a valid token
+        validateClaims(payload);
+
+        UserInfo userInfo = management.getAdminUserByEmail(payload.getBody().get("email").toString());
+
+        return userInfo;
+    }
+
+    @Override
+    public Map<String, String> getDecodedTokenDetails(String token) throws Exception {
+
+       Jws<Claims> jws = getClaims(token);
+
+        Claims claims = jws.getBody();
+        Map<String, String> tokenDetails = new HashMap<>();
+
+        tokenDetails.put("username", (String)claims.get("user_name"));
+        tokenDetails.put("email", (String)claims.get("email"));
+        tokenDetails.put("expiry", claims.get("exp").toString());
+        tokenDetails.put("user_id", claims.get("user_id").toString());
+
+
+        return tokenDetails;
+
+    }
+
+    @Override
+    public Map<String, Object> getAllTokenDetails(String token, String keyUrl) throws Exception {
+        Jws<Claims> claims = getClaimsForKeyUrl(token,getPublicKey(keyUrl));
+        return JsonUtils.toJsonMap(claims.getBody());
+
+    }
+
+    @Override
+    public String getExternalSSOUrl() {
+        return properties.getProperty(USERGRID_EXTERNAL_PUBLICKEY_URL);
+    }
+
+    public Jws<Claims> getClaimsForKeyUrl(String token, PublicKey ssoPublicKey) throws NoSuchAlgorithmException, InvalidKeySpecException, BadTokenException, ExpiredTokenException {
+        Jws<Claims> claims = null;
+
+        if(ssoPublicKey == null){
+            throw new IllegalArgumentException("Public key must be provided with Apigee " +
+                "token in order to verify signature.");
+        }
+
+        try {
+            claims = Jwts.parser().setSigningKey(ssoPublicKey).parseClaimsJws(token);
+        } catch (SignatureException se) {
+            if(logger.isDebugEnabled()) {
+                logger.debug("Signature was invalid for Apigee JWT: {} and key: {}", token, ssoPublicKey);
+            }
+            throw new BadTokenException("Invalid Apigee SSO token signature");
+        } catch (MalformedJwtException me){
+            if(logger.isDebugEnabled()) {
+                logger.debug("Beginning JSON object section of Apigee JWT invalid for token: {}", token);
+            }
+            throw new BadTokenException("Malformed Apigee JWT");
+        } catch (ArrayIndexOutOfBoundsException aio){
+            if(logger.isDebugEnabled()) {
+                logger.debug("Signature section of Apigee JWT invalid for token: {}", token);
+            }
+            throw new BadTokenException("Malformed Apigee JWT");
+        } catch ( ExpiredJwtException e ){
+            final long expiry = Long.valueOf(e.getClaims().get("exp").toString());
+            final long expirationDelta = ((System.currentTimeMillis()/1000) - expiry)*1000;
+            throw new ExpiredTokenException(String.format("Token expired %d milliseconds ago.", expirationDelta ));
+        }
+
+
+        return claims;
+    }
+
+    public Jws<Claims> getClaims(String token) throws Exception{
+
+        return getClaimsForKeyUrl(token,publicKey);
+
+    }
+
+    private void validateClaims (final Jws<Claims> claims) throws ExpiredTokenException {
+
+        final Claims body = claims.getBody();
+
+        final long expiry = Long.valueOf(body.get("exp").toString());
+
+        if(expiry - (System.currentTimeMillis()/1000) < 0 ){
+
+            final long expirationDelta = ((System.currentTimeMillis()/1000) - expiry)*1000;
+
+            throw new ExpiredTokenException(String.format("Token expired %d milliseconds ago.", expirationDelta ));
+        }
+
+    }
+
+
+    public void setPublicKey( PublicKey publicKeyArg){
+        this.publicKey = publicKeyArg;
+    }
+
+    @Autowired
+    public void setManagement(ManagementService management) {
+        this.management = management;
+    }
+
+    @Autowired
+    public void setProperties(Properties properties) {
+        this.properties = properties;
+        this.publicKey =  getPublicKey(getExternalSSOUrl());
+    }
+}
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/sso/ExternalSSOProvider.java b/stack/services/src/main/java/org/apache/usergrid/security/sso/ExternalSSOProvider.java
new file mode 100644
index 0000000..ebd7ec5
--- /dev/null
+++ b/stack/services/src/main/java/org/apache/usergrid/security/sso/ExternalSSOProvider.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.security.sso;
+
+import org.apache.usergrid.management.UserInfo;
+import org.apache.usergrid.security.tokens.TokenInfo;
+
+import java.util.Map;
+
+/**
+ * Created by ayeshadastagiri on 6/22/16.
+ */
+public interface ExternalSSOProvider {
+
+    /** Authenticate a userId and external token against this provider */
+    TokenInfo validateAndReturnTokenInfo(String token, long ttl) throws Exception;
+
+    /** Authenticate a userId and external token against this provider */
+    UserInfo validateAndReturnUserInfo(String token, long ttl) throws Exception;
+
+    /** Decode the token, if supported, and return any information encoded with the token */
+    Map<String, String> getDecodedTokenDetails(String token) throws Exception;
+
+    Map<String, Object> getAllTokenDetails(String token, String keyUrl) throws Exception;
+
+    String getExternalSSOUrl() throws Exception;
+
+}
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/sso/SSOProviderFactory.java b/stack/services/src/main/java/org/apache/usergrid/security/sso/SSOProviderFactory.java
new file mode 100644
index 0000000..31e085e
--- /dev/null
+++ b/stack/services/src/main/java/org/apache/usergrid/security/sso/SSOProviderFactory.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.security.sso;
+
+import org.apache.usergrid.corepersistence.CpEntityManagerFactory;
+import org.apache.usergrid.persistence.EntityManagerFactory;
+import org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.util.List;
+import java.util.Properties;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Created by russo on 6/24/16.
+ */
+public class SSOProviderFactory {
+
+    enum Provider {
+        APIGEE, USERGRID
+    }
+
+    private EntityManagerFactory emf;
+    protected Properties properties;
+
+
+    public ExternalSSOProvider getProvider(){
+
+        return getSpecificProvider(properties.getProperty(TokenServiceImpl.USERGRID_EXTERNAL_SSO_PROVIDER));
+
+    }
+
+    public ExternalSSOProvider getSpecificProvider(String providerName){
+
+        final Provider specifiedProvider ;
+        try{
+            specifiedProvider = Provider.valueOf(providerName.toUpperCase());
+        }
+        catch(IllegalArgumentException e){
+            throw new IllegalArgumentException("Unsupported provider");
+        }
+
+        switch (specifiedProvider){
+            case APIGEE:
+                return ((CpEntityManagerFactory)emf).getApplicationContext().getBean( ApigeeSSO2Provider.class );
+            case USERGRID:
+                return ((CpEntityManagerFactory)emf).getApplicationContext().getBean( UsergridExternalProvider.class );
+            default:
+                throw new RuntimeException("Unknown SSO provider");
+        }
+    }
+
+
+    @Autowired
+    public void setEntityManagerFactory( EntityManagerFactory emf ) {
+        this.emf = emf;
+    }
+
+
+    @Autowired
+    public void setProperties(Properties properties) {
+        this.properties = properties;
+    }
+
+    public List<String> getProvidersList() {
+        return Stream.of(Provider.values())
+            .map(Enum::name)
+            .collect(Collectors.toList());
+    }
+}
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/sso/UsergridExternalProvider.java b/stack/services/src/main/java/org/apache/usergrid/security/sso/UsergridExternalProvider.java
new file mode 100644
index 0000000..0cfe2d0
--- /dev/null
+++ b/stack/services/src/main/java/org/apache/usergrid/security/sso/UsergridExternalProvider.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.usergrid.security.sso;
+
+import com.codahale.metrics.Counter;
+import com.google.inject.Injector;
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.http.impl.conn.PoolingClientConnectionManager;
+import org.apache.usergrid.management.*;
+import org.apache.usergrid.persistence.core.metrics.MetricsFactory;
+import org.apache.usergrid.persistence.exceptions.EntityNotFoundException;
+import org.apache.usergrid.security.tokens.TokenInfo;
+import org.codehaus.jackson.JsonNode;
+import org.glassfish.jersey.apache.connector.ApacheClientProperties;
+import org.glassfish.jersey.apache.connector.ApacheConnectorProvider;
+import org.glassfish.jersey.client.ClientConfig;
+import org.glassfish.jersey.client.ClientProperties;
+import org.glassfish.jersey.jackson.JacksonFeature;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import javax.ws.rs.client.Client;
+import javax.ws.rs.client.ClientBuilder;
+import javax.ws.rs.core.MediaType;
+import java.util.*;
+
+/**
+ * Created by ayeshadastagiri on 6/23/16.
+ */
+public class UsergridExternalProvider implements ExternalSSOProvider {
+    private static final Logger logger = LoggerFactory.getLogger(ApigeeSSO2Provider.class);
+
+    private static final String SSO_PROCESSING_TIME = "sso.processing_time";
+    private static final String SSO_TOKENS_REJECTED = "sso.tokens_rejected";
+    private static final String SSO_TOKENS_VALIDATED = "sso.tokens_validated";
+    public static final String USERGRID_CENTRAL_URL = "usergrid.external.sso.url";
+    public static final String CENTRAL_CONNECTION_POOL_SIZE = "usergrid.central.connection.pool.size";
+    public static final String CENTRAL_CONNECTION_TIMEOUT = "usergrid.central.connection.timeout";
+    public static final String CENTRAL_READ_TIMEOUT = "usergrid.central.read.timeout";
+    private static final String SSO_CREATED_LOCAL_ADMINS = "sso.created_local_admins";
+
+    protected ManagementService management;
+    protected MetricsFactory metricsFactory;
+    protected Properties properties;
+
+    private static Client jerseyClient = null;
+
+    @Autowired
+    private Injector injector;
+
+    @Autowired
+    private ApplicationCreator applicationCreator;
+
+    @Autowired
+    public void setManagement(ManagementService management) {
+        this.management = management;
+    }
+
+    @Autowired
+    public void setProperties(Properties properties) {
+        this.properties = properties;
+    }
+
+    @Autowired
+    public void setMetricFactory() {
+        this.metricsFactory = injector.getInstance(MetricsFactory.class);
+    }
+
+    MetricsFactory getMetricsFactory() {
+        return metricsFactory;
+    }
+
+    @Override
+    public TokenInfo validateAndReturnTokenInfo(String token, long ttl) throws Exception {
+        throw new UnsupportedOperationException("Returning user info not supported from external Usergrid SSO tokens");
+    }
+
+    @Override
+    public Map<String, Object> getAllTokenDetails(String token, String keyUrl) throws Exception {
+        throw new UnsupportedOperationException("Returning all token details info not supported from external Usergrid SSO tokens");
+
+    }
+
+    @Override
+    public String getExternalSSOUrl() {
+        return properties.getProperty(USERGRID_CENTRAL_URL);
+    }
+
+
+    @Override
+    public UserInfo validateAndReturnUserInfo(String token, long ttl) throws Exception {
+        if (token == null) {
+            throw new IllegalArgumentException("ext_access_token must be specified");
+        }
+        if (ttl == -1) {
+            throw new IllegalArgumentException("ttl must be specified");
+        }
+
+        com.codahale.metrics.Timer processingTimer = getMetricsFactory().getTimer(
+            UsergridExternalProvider.class, SSO_PROCESSING_TIME);
+
+        com.codahale.metrics.Timer.Context timerContext = processingTimer.time();
+
+        try {
+            // look up user via UG Central's /management/me endpoint.
+
+            JsonNode accessInfoNode = getMeFromUgCentral(token);
+
+            JsonNode userNode = accessInfoNode.get("user");
+
+            String username = userNode.get("username").asText();
+
+            // if user does not exist locally then we need to fix that
+
+            UserInfo userInfo = management.getAdminUserByUsername(username);
+            UUID userId = userInfo == null ? null : userInfo.getUuid();
+
+            if (userId == null) {
+
+                // create local user and and organizations they have on the central Usergrid instance
+                logger.info("User {} does not exist locally, creating", username);
+
+                String name = userNode.get("name").asText();
+                String email = userNode.get("email").asText();
+                String dummyPassword = RandomStringUtils.randomAlphanumeric(40);
+
+                JsonNode orgsNode = userNode.get("organizations");
+                Iterator<String> fieldNames = orgsNode.getFieldNames();
+
+                if (!fieldNames.hasNext()) {
+                    // no organizations for user exist in response from central Usergrid SSO
+                    // so create user's personal organization and use username as organization name
+                    fieldNames = Collections.singletonList(username).iterator();
+                }
+
+                // create user and any organizations that user is supposed to have
+
+                while (fieldNames.hasNext()) {
+
+                    String orgName = fieldNames.next();
+
+                    if (userId == null) {
+//
+                        // haven't created user yet so do that now
+                        OrganizationOwnerInfo ownerOrgInfo = management.createOwnerAndOrganization(
+                            orgName, username, name, email, dummyPassword, true, false);
+
+                        applicationCreator.createSampleFor(ownerOrgInfo.getOrganization());
+
+                        userId = ownerOrgInfo.getOwner().getUuid();
+                        userInfo = ownerOrgInfo.getOwner();
+
+                        Counter createdAdminsCounter = getMetricsFactory().getCounter(
+                            UsergridExternalProvider.class, SSO_CREATED_LOCAL_ADMINS);
+                        createdAdminsCounter.inc();
+
+                        logger.info("Created user {} and org {}", username, orgName);
+
+                    } else {
+
+                        // already created user, so just create an org
+                        final OrganizationInfo organization =
+                            management.createOrganization(orgName, userInfo, true);
+
+                        applicationCreator.createSampleFor(organization);
+
+                        logger.info("Created user {}'s other org {}", username, orgName);
+                    }
+                }
+            }
+
+            return userInfo;
+        } catch (Exception e) {
+            timerContext.stop();
+            logger.debug("Error validating external token", e);
+            throw e;
+        }
+
+    }
+
+    @Override
+    public Map<String, String> getDecodedTokenDetails(String token) {
+
+        throw new UnsupportedOperationException("Not currently supported with Usergrid external tokens");
+
+    }
+
+    /**
+     * Look up Admin User via UG Central's /management/me endpoint.
+     *
+     * @param extAccessToken Access token issued by UG Central of Admin User
+     * @return JsonNode representation of AccessInfo object for Admin User
+     * @throws EntityNotFoundException if access_token is not valid.
+     */
+    private JsonNode getMeFromUgCentral(String extAccessToken) throws EntityNotFoundException {
+
+        // prepare to count tokens validated and rejected
+
+        Counter tokensRejectedCounter = getMetricsFactory().getCounter(
+            UsergridExternalProvider.class, SSO_TOKENS_REJECTED);
+        Counter tokensValidatedCounter = getMetricsFactory().getCounter(
+            UsergridExternalProvider.class, SSO_TOKENS_VALIDATED);
+
+        // create URL of central Usergrid's /management/me endpoint
+
+        String externalUrl = properties.getProperty(USERGRID_CENTRAL_URL).trim();
+
+        // be lenient about trailing slash
+        externalUrl = !externalUrl.endsWith("/") ? externalUrl + "/" : externalUrl;
+        String me = externalUrl + "management/me?access_token=" + extAccessToken;
+
+        // use our favorite HTTP client to GET /management/me
+
+        Client client = getJerseyClient();
+        final org.codehaus.jackson.JsonNode accessInfoNode;
+        try {
+            accessInfoNode = client.target(me).request()
+                .accept(MediaType.APPLICATION_JSON_TYPE)
+                .get(org.codehaus.jackson.JsonNode.class);
+
+            tokensValidatedCounter.inc();
+
+        } catch (Exception e) {
+            // user not found 404
+            tokensRejectedCounter.inc();
+            String msg = "Cannot find Admin User associated with " + extAccessToken;
+            throw new EntityNotFoundException(msg, e);
+        }
+
+        return accessInfoNode;
+    }
+
+    private Client getJerseyClient() {
+
+        if (jerseyClient == null) {
+
+            synchronized (this) {
+
+                // create HTTPClient and with configured connection pool
+
+                int poolSize = 100; // connections
+                final String poolSizeStr = properties.getProperty(CENTRAL_CONNECTION_POOL_SIZE);
+                if (poolSizeStr != null) {
+                    poolSize = Integer.parseInt(poolSizeStr);
+                }
+
+                PoolingClientConnectionManager connectionManager = new PoolingClientConnectionManager();
+                connectionManager.setMaxTotal(poolSize);
+
+                int timeout = 20000; // ms
+                final String timeoutStr = properties.getProperty(CENTRAL_CONNECTION_TIMEOUT);
+                if (timeoutStr != null) {
+                    timeout = Integer.parseInt(timeoutStr);
+                }
+
+                int readTimeout = 20000; // ms
+                final String readTimeoutStr = properties.getProperty(CENTRAL_READ_TIMEOUT);
+                if (readTimeoutStr != null) {
+                    readTimeout = Integer.parseInt(readTimeoutStr);
+                }
+
+                ClientConfig clientConfig = new ClientConfig();
+                clientConfig.register(new JacksonFeature());
+                clientConfig.property(ApacheClientProperties.CONNECTION_MANAGER, connectionManager);
+                clientConfig.connectorProvider(new ApacheConnectorProvider());
+
+                jerseyClient = ClientBuilder.newClient(clientConfig);
+                jerseyClient.property(ClientProperties.CONNECT_TIMEOUT, timeout);
+                jerseyClient.property(ClientProperties.READ_TIMEOUT, readTimeout);
+            }
+        }
+
+        return jerseyClient;
+
+    }
+}
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/tokens/TokenService.java b/stack/services/src/main/java/org/apache/usergrid/security/tokens/TokenService.java
index 2ef5d59..308c428 100644
--- a/stack/services/src/main/java/org/apache/usergrid/security/tokens/TokenService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/security/tokens/TokenService.java
@@ -17,11 +17,11 @@
 package org.apache.usergrid.security.tokens;
 
 
+import org.apache.usergrid.security.AuthPrincipalInfo;
+
 import java.util.Map;
 import java.util.UUID;
 
-import org.apache.usergrid.security.AuthPrincipalInfo;
-
 
 public interface TokenService {
 
@@ -65,4 +65,10 @@
      * given principal uuid and application uuid
      */
     public void removeTokens( AuthPrincipalInfo principal ) throws Exception;
+
+
+    /**
+     * checks if the external SSO provider is enabled.
+     */
+    public boolean isExternalSSOProviderEnabled();
 }
diff --git a/stack/services/src/main/java/org/apache/usergrid/security/tokens/cassandra/TokenServiceImpl.java b/stack/services/src/main/java/org/apache/usergrid/security/tokens/cassandra/TokenServiceImpl.java
index 5c71b1b..6ea6de0 100644
--- a/stack/services/src/main/java/org/apache/usergrid/security/tokens/cassandra/TokenServiceImpl.java
+++ b/stack/services/src/main/java/org/apache/usergrid/security/tokens/cassandra/TokenServiceImpl.java
@@ -17,40 +17,32 @@
 package org.apache.usergrid.security.tokens.cassandra;
 
 
-import com.codahale.metrics.Counter;
 import com.google.inject.Injector;
 import me.prettyprint.hector.api.Keyspace;
 import me.prettyprint.hector.api.beans.HColumn;
 import me.prettyprint.hector.api.mutation.Mutator;
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.http.impl.conn.PoolingClientConnectionManager;
 import org.apache.usergrid.corepersistence.CpEntityManagerFactory;
 import org.apache.usergrid.corepersistence.util.CpNamingUtils;
-import org.apache.usergrid.exception.NotImplementedException;
-import org.apache.usergrid.management.*;
+import org.apache.usergrid.management.ApplicationCreator;
+import org.apache.usergrid.management.ManagementService;
+import org.apache.usergrid.management.UserInfo;
 import org.apache.usergrid.persistence.EntityManagerFactory;
 import org.apache.usergrid.persistence.cassandra.CassandraService;
 import org.apache.usergrid.persistence.core.metrics.MetricsFactory;
 import org.apache.usergrid.persistence.entities.Application;
-import org.apache.usergrid.persistence.exceptions.EntityNotFoundException;
 import org.apache.usergrid.security.AuthPrincipalInfo;
 import org.apache.usergrid.security.AuthPrincipalType;
+import org.apache.usergrid.security.sso.SSOProviderFactory;
 import org.apache.usergrid.security.tokens.TokenCategory;
 import org.apache.usergrid.security.tokens.TokenInfo;
 import org.apache.usergrid.security.tokens.TokenService;
 import org.apache.usergrid.security.tokens.exceptions.BadTokenException;
 import org.apache.usergrid.security.tokens.exceptions.ExpiredTokenException;
 import org.apache.usergrid.security.tokens.exceptions.InvalidTokenException;
+import org.apache.usergrid.security.sso.ExternalSSOProvider;
 import org.apache.usergrid.utils.ConversionUtils;
 import org.apache.usergrid.utils.JsonUtils;
 import org.apache.usergrid.utils.UUIDUtils;
-import org.codehaus.jackson.JsonNode;
-import org.glassfish.jersey.apache.connector.ApacheClientProperties;
-import org.glassfish.jersey.apache.connector.ApacheConnectorProvider;
-import org.glassfish.jersey.client.ClientConfig;
-import org.glassfish.jersey.client.ClientProperties;
-import org.glassfish.jersey.jackson.JacksonFeature;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -58,8 +50,6 @@
 import org.springframework.util.Assert;
 
 import javax.ws.rs.client.Client;
-import javax.ws.rs.client.ClientBuilder;
-import javax.ws.rs.core.MediaType;
 import java.nio.ByteBuffer;
 import java.util.*;
 
@@ -326,21 +316,51 @@
     @Override
     public TokenInfo getTokenInfo( String token, boolean updateAccessTime ) throws Exception {
 
-        UUID uuid = getUUIDForToken( token );
+        UUID uuid;
 
-        long ssoTtl = 1000000L; // TODO: property for this
 
-        if ( uuid == null ) {
-            return isSSOEnabled() ? validateExternalToken( token, ssoTtl ) : null;
+        /** Pre-validation of the token string based on Usergrid's encoding scheme.
+         *
+         * If the token does not parse out a UUID, then it's not a Usergrid token.  Check if External SSO provider
+         * is configured, which is not Usergrid and immediately try to validate the token based on this parsing
+         * information.
+         */
+        try{
+            uuid = getUUIDForToken( token );
+        }
+        catch (ExpiredTokenException expiredTokenException){
+            throw new ExpiredTokenException(expiredTokenException.getMessage());
+        }
+        catch(Exception e){
+
+            // If the token doesn't parse as a Usergrid token, see if an external provider other than Usergrid is
+            // enabled.  If so, just validate the external token.
+            try{
+                if( isExternalSSOProviderEnabled() && !getExternalSSOProvider().equalsIgnoreCase("usergrid")) {
+                    return validateExternalToken(token, 1, getExternalSSOProvider());
+                }else{
+                    throw new IllegalArgumentException("invalid external provider : " + getExternalSSOProvider()); // re-throw the error
+                }
+            }
+            catch (NullPointerException npe){
+                throw new IllegalArgumentException("The SSO provider in the config is empty.");
+            }
+
         }
 
-        TokenInfo tokenInfo;
+        final TokenInfo tokenInfo;
+
+        /**
+         * Now try actual Usergrid token validations.  First try locally.  If that fails and SSO is enabled with
+         * Usergrid being a provider, validate the external token.
+         */
         try {
             tokenInfo = getTokenInfo( uuid );
         } catch (InvalidTokenException e){
-            // now try from central sso
-            if ( isSSOEnabled() ){
-                return validateExternalToken( token, maxPersistenceTokenAge );
+            // Try the request from Usergrid, conditions are specific so we don't incur perf hits for unncessary
+            // token validations that are known to not
+            if ( isExternalSSOProviderEnabled() && getExternalSSOProvider().equalsIgnoreCase("usergrid") ){
+                return validateExternalToken( token, maxPersistenceTokenAge, getExternalSSOProvider() );
             }else{
                 throw e; // re-throw the error
             }
@@ -625,7 +645,7 @@
         long expirationDelta = System.currentTimeMillis() - expires;
 
         if ( expires != Long.MAX_VALUE && expirationDelta > 0 ) {
-            throw new ExpiredTokenException( String.format( "Token expired %d millisecons ago.", expirationDelta ) );
+            throw new ExpiredTokenException( String.format( "Token expired %d milliseconds ago.", expirationDelta ) );
         }
         return uuid;
     }
@@ -743,17 +763,25 @@
     //
     // Central SSO implementation
 
-    public static final String USERGRID_CENTRAL_URL =         "usergrid.central.url";
     public static final String CENTRAL_CONNECTION_POOL_SIZE = "usergrid.central.connection.pool.size";
     public static final String CENTRAL_CONNECTION_TIMEOUT =   "usergrid.central.connection.timeout";
     public static final String CENTRAL_READ_TIMEOUT =         "usergrid.central.read.timeout";
 
+
     // names for metrics to be collected
     private static final String SSO_TOKENS_REJECTED =         "sso.tokens_rejected";
     private static final String SSO_TOKENS_VALIDATED =        "sso.tokens_validated";
     private static final String SSO_CREATED_LOCAL_ADMINS =    "sso.created_local_admins";
     private static final String SSO_PROCESSING_TIME =         "sso.processing_time";
 
+    //SSO2 implementation
+    public static final String USERGRID_EXTERNAL_SSO_ENABLED = "usergrid.external.sso.enabled";
+    public static final String USERGRID_EXTERNAL_SSO_PROVIDER =    "usergrid.external.sso.provider";
+    public static final String USERGRID_EXTERNAL_SSO_PROVIDER_URL = "usergrid.external.sso.url";
+    public static final String USERGRID_EXTERNAL_SSO_PROVIDER_USER_PROVISION_URL
+        = "usergrid.external.sso.userprovision.url";
+
+
     private static Client jerseyClient = null;
 
     @Autowired
@@ -762,14 +790,21 @@
     @Autowired
     protected ManagementService management;
 
+    @Autowired
+    private SSOProviderFactory ssoProviderFactory;
+
     MetricsFactory getMetricsFactory() {
         return metricsFactory;
     }
 
-    private boolean isSSOEnabled() {
-        return !StringUtils.isEmpty( properties.getProperty( USERGRID_CENTRAL_URL ));
+
+    public boolean isExternalSSOProviderEnabled() {
+        return Boolean.valueOf(properties.getProperty( USERGRID_EXTERNAL_SSO_ENABLED ));
     }
 
+    private String getExternalSSOProvider(){
+            return properties.getProperty(USERGRID_EXTERNAL_SSO_PROVIDER);
+    }
 
     /**
      * <p>
@@ -788,201 +823,26 @@
      * @param extAccessToken Access token from external Usergrid system.
      * @param ttl            Time to live for token.
      */
-    public TokenInfo validateExternalToken(String extAccessToken, long ttl) throws Exception {
+    public TokenInfo validateExternalToken(String extAccessToken, long ttl, String provider) throws Exception {
 
-        TokenInfo tokenInfo = null;
 
-        if (!isSSOEnabled()) {
-            throw new NotImplementedException( "External Token Validation Service not enabled" );
-        }
+        ExternalSSOProvider ssoProvider = ssoProviderFactory.getProvider();
 
-        if (extAccessToken == null) {
-            throw new IllegalArgumentException( "ext_access_token must be specified" );
-        }
+        if(provider.equalsIgnoreCase("usergrid")){
 
-        if (ttl == -1) {
-            throw new IllegalArgumentException( "ttl must be specified" );
-        }
+            UserInfo userinfo = ssoProvider.validateAndReturnUserInfo(extAccessToken,ttl);
 
-        com.codahale.metrics.Timer processingTimer = getMetricsFactory().getTimer(
-            TokenServiceImpl.class, SSO_PROCESSING_TIME );
-
-        com.codahale.metrics.Timer.Context timerContext = processingTimer.time();
-
-        try {
-            // look up user via UG Central's /management/me endpoint.
-
-            JsonNode accessInfoNode = getMeFromUgCentral( extAccessToken );
-
-            JsonNode userNode = accessInfoNode.get( "user" );
-
-            String username = userNode.get( "username" ).asText();
-
-            // if user does not exist locally then we need to fix that
-
-            UserInfo userInfo = management.getAdminUserByUsername( username );
-            UUID userId = userInfo == null ? null : userInfo.getUuid();
-
-            if (userId == null) {
-
-                // create local user and and organizations they have on the central Usergrid instance
-                logger.info( "User {} does not exist locally, creating", username );
-
-                String name = userNode.get( "name" ).asText();
-                String email = userNode.get( "email" ).asText();
-                String dummyPassword = RandomStringUtils.randomAlphanumeric( 40 );
-
-                JsonNode orgsNode = userNode.get( "organizations" );
-                Iterator<String> fieldNames = orgsNode.getFieldNames();
-
-                if (!fieldNames.hasNext()) {
-                    // no organizations for user exist in response from central Usergrid SSO
-                    // so create user's personal organization and use username as organization name
-                    fieldNames = Collections.singletonList( username ).iterator();
-                }
-
-                // create user and any organizations that user is supposed to have
-
-                while (fieldNames.hasNext()) {
-
-                    String orgName = fieldNames.next();
-
-                    if (userId == null) {
-
-                        // haven't created user yet so do that now
-                        OrganizationOwnerInfo ownerOrgInfo = management.createOwnerAndOrganization(
-                            orgName, username, name, email, dummyPassword, true, false );
-
-                        applicationCreator.createSampleFor( ownerOrgInfo.getOrganization() );
-
-                        userId = ownerOrgInfo.getOwner().getUuid();
-                        userInfo = ownerOrgInfo.getOwner();
-
-                        Counter createdAdminsCounter = getMetricsFactory().getCounter(
-                            TokenServiceImpl.class, SSO_CREATED_LOCAL_ADMINS );
-                        createdAdminsCounter.inc();
-
-                        logger.info( "Created user {} and org {}", username, orgName );
-
-                    } else {
-
-                        // already created user, so just create an org
-                        final OrganizationInfo organization =
-                            management.createOrganization( orgName, userInfo, true );
-
-                        applicationCreator.createSampleFor( organization );
-
-                        logger.info( "Created user {}'s other org {}", username, orgName );
-                    }
-                }
-            }
-
-            // store the external access_token as if it were one of our own
+            // Store the external Usergrid access_token as if it were one of our own so we don't have to make the
+            // external HTTP validation call on subsequent requests
             importToken( extAccessToken, TokenCategory.ACCESS, null, new AuthPrincipalInfo(
-                ADMIN_USER, userId, CpNamingUtils.MANAGEMENT_APPLICATION_ID), null, ttl );
+                ADMIN_USER, userinfo.getUuid(), CpNamingUtils.MANAGEMENT_APPLICATION_ID), null, ttl );
+            return getTokenInfo( extAccessToken );
 
-            tokenInfo = getTokenInfo( extAccessToken );
+        }else{
 
-        } catch (Exception e) {
-            timerContext.stop();
-            logger.debug( "Error validating external token", e );
-            throw e;
+            return ssoProvider.validateAndReturnTokenInfo(extAccessToken,ttl);
         }
 
-        return tokenInfo;
     }
 
-
-    /**
-     * Look up Admin User via UG Central's /management/me endpoint.
-     *
-     * @param extAccessToken Access token issued by UG Central of Admin User
-     * @return JsonNode representation of AccessInfo object for Admin User
-     * @throws EntityNotFoundException if access_token is not valid.
-     */
-    private JsonNode getMeFromUgCentral( String extAccessToken )  throws EntityNotFoundException {
-
-        // prepare to count tokens validated and rejected
-
-        Counter tokensRejectedCounter = getMetricsFactory().getCounter(
-            TokenServiceImpl.class, SSO_TOKENS_REJECTED );
-        Counter tokensValidatedCounter = getMetricsFactory().getCounter(
-            TokenServiceImpl.class, SSO_TOKENS_VALIDATED );
-
-        // create URL of central Usergrid's /management/me endpoint
-
-        String externalUrl = properties.getProperty( USERGRID_CENTRAL_URL ).trim();
-
-        // be lenient about trailing slash
-        externalUrl = !externalUrl.endsWith( "/" ) ? externalUrl + "/" : externalUrl;
-        String me = externalUrl + "management/me?access_token=" + extAccessToken;
-
-        // use our favorite HTTP client to GET /management/me
-
-        Client client = getJerseyClient();
-        final JsonNode accessInfoNode;
-        try {
-            accessInfoNode = client.target( me ).request()
-                .accept( MediaType.APPLICATION_JSON_TYPE )
-                .get(JsonNode.class);
-
-            tokensValidatedCounter.inc();
-
-        } catch ( Exception e ) {
-            // user not found 404
-            tokensRejectedCounter.inc();
-            String msg = "Cannot find Admin User associated with " + extAccessToken;
-            throw new EntityNotFoundException( msg, e );
-        }
-
-        return accessInfoNode;
-    }
-
-
-
-    private Client getJerseyClient() {
-
-        if ( jerseyClient == null ) {
-
-            synchronized ( this ) {
-
-                // create HTTPClient and with configured connection pool
-
-                int poolSize = 100; // connections
-                final String poolSizeStr = properties.getProperty( CENTRAL_CONNECTION_POOL_SIZE );
-                if ( poolSizeStr != null ) {
-                    poolSize = Integer.parseInt( poolSizeStr );
-                }
-
-                PoolingClientConnectionManager connectionManager = new PoolingClientConnectionManager();
-                connectionManager.setMaxTotal(poolSize);
-
-                int timeout = 20000; // ms
-                final String timeoutStr = properties.getProperty( CENTRAL_CONNECTION_TIMEOUT );
-                if ( timeoutStr != null ) {
-                    timeout = Integer.parseInt( timeoutStr );
-                }
-
-                int readTimeout = 20000; // ms
-                final String readTimeoutStr = properties.getProperty( CENTRAL_READ_TIMEOUT );
-                if ( readTimeoutStr != null ) {
-                    readTimeout = Integer.parseInt( readTimeoutStr );
-                }
-
-                ClientConfig clientConfig = new ClientConfig();
-                clientConfig.register( new JacksonFeature() );
-                clientConfig.property( ApacheClientProperties.CONNECTION_MANAGER, connectionManager );
-                clientConfig.connectorProvider( new ApacheConnectorProvider() );
-
-                jerseyClient = ClientBuilder.newClient( clientConfig );
-                jerseyClient.property( ClientProperties.CONNECT_TIMEOUT, timeout );
-                jerseyClient.property( ClientProperties.READ_TIMEOUT, readTimeout );
-            }
-        }
-
-        return jerseyClient;
-
-    }
-
-
 }
diff --git a/stack/services/src/main/java/org/apache/usergrid/services/AbstractConnectionsService.java b/stack/services/src/main/java/org/apache/usergrid/services/AbstractConnectionsService.java
index 83549dd..dee78f9 100644
--- a/stack/services/src/main/java/org/apache/usergrid/services/AbstractConnectionsService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/services/AbstractConnectionsService.java
@@ -17,21 +17,8 @@
 package org.apache.usergrid.services;
 
 
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.Query;
+import org.apache.usergrid.persistence.*;
 import org.apache.usergrid.persistence.Query.Level;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.Schema;
-import org.apache.usergrid.persistence.SimpleEntityRef;
 import org.apache.usergrid.persistence.index.query.Identifier;
 import org.apache.usergrid.services.ServiceParameter.IdParameter;
 import org.apache.usergrid.services.ServiceParameter.NameParameter;
@@ -39,10 +26,15 @@
 import org.apache.usergrid.services.ServiceResults.Type;
 import org.apache.usergrid.services.exceptions.ServiceResourceNotFoundException;
 import org.apache.usergrid.services.exceptions.UnsupportedServiceOperationException;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import rx.Observable;
 import rx.schedulers.Schedulers;
 
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+
 import static org.apache.usergrid.services.ServiceParameter.filter;
 import static org.apache.usergrid.services.ServiceParameter.firstParameterIsName;
 import static org.apache.usergrid.utils.ClassUtils.cast;
@@ -241,7 +233,7 @@
 
         ServiceResults results = getItemsByQuery( context, context.getQuery() );
 
-        if ( results.size() == 0 ) {
+        if ( results == null || results.size() == 0 ) {
             throw new ServiceResourceNotFoundException( context );
         }
 
@@ -307,6 +299,7 @@
         Results r = null;
 
         if ( connecting() ) {
+            query.setConnecting(true);
             if ( query.hasQueryPredicates() ) {
                 if (logger.isTraceEnabled()) {
                     logger.trace("Attempted query of backwards connections");
@@ -314,13 +307,7 @@
                 return null;
             }
             else {
-//            	r = em.getSourceEntities( context.getOwner().getUuid(), query.getConnectionType(),
-//            			query.getEntityType(), level );
-                // usergrid-2389: User defined limit in the query is ignored. Fixed it by adding
-                // the limit to the method parameter downstream.
-            	r = em.getSourceEntities(
-                    new SimpleEntityRef(context.getOwner().getType(), context.getOwner().getUuid()),
-                    query.getConnectionType(), query.getEntityType(), level, query.getLimit());
+                r = em.searchTargetEntities(context.getOwner(),query);
             }
         }
         else {
@@ -381,6 +368,10 @@
             }
             else {
                 entity = em.create( query.getEntityType(), context.getProperties() );
+                //if entity is null here it throws NPE. Fixing it to throw 404.
+                if ( entity == null ) {
+                    throw new ServiceResourceNotFoundException( context );
+                }
             }
             entity = importEntity( context, entity );
 
diff --git a/stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedColllectionService.java b/stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedCollectionService.java
similarity index 97%
rename from stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedColllectionService.java
rename to stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedCollectionService.java
index 2d5b7e1..f859c65 100644
--- a/stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedColllectionService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/services/AbstractPathBasedCollectionService.java
@@ -38,12 +38,12 @@
 import static org.apache.usergrid.utils.ListUtils.isEmpty;
 
 
-public class AbstractPathBasedColllectionService extends AbstractCollectionService {
+public class AbstractPathBasedCollectionService extends AbstractCollectionService {
 
-    private static final Logger logger = LoggerFactory.getLogger( AbstractPathBasedColllectionService.class );
+    private static final Logger logger = LoggerFactory.getLogger( AbstractPathBasedCollectionService.class );
 
 
-    public AbstractPathBasedColllectionService() {
+    public AbstractPathBasedCollectionService() {
         super();
     }
 
diff --git a/stack/services/src/main/java/org/apache/usergrid/services/assets/AssetsService.java b/stack/services/src/main/java/org/apache/usergrid/services/assets/AssetsService.java
index 41aaa82..b2f2a24 100644
--- a/stack/services/src/main/java/org/apache/usergrid/services/assets/AssetsService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/services/assets/AssetsService.java
@@ -22,12 +22,12 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.services.AbstractPathBasedColllectionService;
+import org.apache.usergrid.services.AbstractPathBasedCollectionService;
 import org.apache.usergrid.services.ServiceContext;
 import org.apache.usergrid.services.ServiceResults;
 
 
-public class AssetsService extends AbstractPathBasedColllectionService {
+public class AssetsService extends AbstractPathBasedCollectionService {
 
     private static final Logger logger = LoggerFactory.getLogger( AssetsService.class );
 
diff --git a/stack/services/src/main/java/org/apache/usergrid/services/groups/GroupsService.java b/stack/services/src/main/java/org/apache/usergrid/services/groups/GroupsService.java
index aa125d4..e99a859 100644
--- a/stack/services/src/main/java/org/apache/usergrid/services/groups/GroupsService.java
+++ b/stack/services/src/main/java/org/apache/usergrid/services/groups/GroupsService.java
@@ -30,7 +30,7 @@
 import org.apache.usergrid.persistence.EntityRef;
 import org.apache.usergrid.persistence.Query;
 import org.apache.usergrid.persistence.entities.Role;
-import org.apache.usergrid.services.AbstractPathBasedColllectionService;
+import org.apache.usergrid.services.AbstractPathBasedCollectionService;
 import org.apache.usergrid.services.ServiceContext;
 import org.apache.usergrid.services.ServicePayload;
 import org.apache.usergrid.services.ServiceResults;
@@ -42,7 +42,7 @@
 import static org.apache.usergrid.services.ServiceResults.genericServiceResults;
 
 
-public class GroupsService extends AbstractPathBasedColllectionService {
+public class GroupsService extends AbstractPathBasedCollectionService {
 
     private static final Logger logger = LoggerFactory.getLogger( GroupsService.class );
 
diff --git a/stack/services/src/main/resources/usergrid-services-context.xml b/stack/services/src/main/resources/usergrid-services-context.xml
index e9b514a..268b5ec 100644
--- a/stack/services/src/main/resources/usergrid-services-context.xml
+++ b/stack/services/src/main/resources/usergrid-services-context.xml
@@ -77,7 +77,14 @@
 
     <bean id="saltProvider" class="org.apache.usergrid.security.salt.NoOpSaltProvider"/>
 
+    <!-- singletons for custom SSO providers if they are used -->
+    <bean id="apigeeSSO2Provider" class="org.apache.usergrid.security.sso.ApigeeSSO2Provider">
+        <property name="management" ref="managementService" />
+    </bean>
 
+    <bean id="usergridExternalProvider" class="org.apache.usergrid.security.sso.UsergridExternalProvider">
+        <property name="management" ref="managementService" />
+    </bean>
 
     <bean id="serviceManagerFactory" class="org.apache.usergrid.services.ServiceManagerFactory">
         <constructor-arg ref="entityManagerFactory"/>
@@ -93,11 +100,18 @@
         <constructor-arg ref="managementService"/>
     </bean>
 
+    <!-- sign in providers are for application users to have integration with FB,Ping,etc. -->
     <bean id="signInProviderFactory" class="org.apache.usergrid.security.providers.SignInProviderFactory">
         <property name="entityManagerFactory" ref="entityManagerFactory"/>
         <property name="managementService" ref="managementService"/>
     </bean>
 
+    <!-- sso providers are for admin user accounts integrated with SSO systems like Apigee or central UG -->
+    <bean id="ssoProviderFactory" class="org.apache.usergrid.security.sso.SSOProviderFactory">
+        <property name="entityManagerFactory" ref="entityManagerFactory"/>
+        <property name="properties" ref="properties"/>
+    </bean>
+
     <bean id="exportService" class="org.apache.usergrid.management.export.ExportServiceImpl">
         <property name="managementService" ref="managementService"/>
         <property name="emf" ref="entityManagerFactory"/>
diff --git a/stack/services/src/test/java/org/apache/usergrid/services/ConnectionsServiceIT.java b/stack/services/src/test/java/org/apache/usergrid/services/ConnectionsServiceIT.java
index a1f19d4..4e65f54 100644
--- a/stack/services/src/test/java/org/apache/usergrid/services/ConnectionsServiceIT.java
+++ b/stack/services/src/test/java/org/apache/usergrid/services/ConnectionsServiceIT.java
@@ -17,19 +17,17 @@
 package org.apache.usergrid.services;
 
 
-import java.util.Map;
-
+import org.apache.usergrid.persistence.Entity;
+import org.apache.usergrid.persistence.Query;
 import org.junit.Assert;
 import org.junit.Test;
-
-import org.apache.usergrid.persistence.Entity;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
 
 
 public class ConnectionsServiceIT extends AbstractServiceIT {
@@ -86,6 +84,66 @@
         app.testRequest( ServiceAction.POST, 1, "users", "conn-user1", "manages", "user" );
     }
 
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testUserConnectionsCursor() throws Exception {
+        app.put("username", "conn-user1");
+        app.put("email", "conn-user1@apigee.com");
+
+        Entity user1 = app.testRequest(ServiceAction.POST, 1, "users").getEntity();
+        assertNotNull(user1);
+
+        app.testRequest(ServiceAction.GET, 1, "users", "conn-user1");
+
+        app.put("username", "conn-user2");
+        app.put("email", "conn-user2@apigee.com");
+
+        Entity user2 = app.testRequest(ServiceAction.POST, 1, "users").getEntity();
+        assertNotNull(user2);
+
+
+        app.put("username", "conn-user3");
+        app.put("email", "conn-user3@apigee.com");
+
+        Entity user3 = app.testRequest(ServiceAction.POST, 1, "users").getEntity();
+        assertNotNull(user3);
+
+
+        //POST users/conn-user2/manages/user2/conn-user1
+        app.testRequest(ServiceAction.POST, 1, "users", "conn-user2", "likes", "users", "conn-user1");
+        //POST users/conn-user3/reports/users/conn-user1
+        app.testRequest(ServiceAction.POST, 1, "users", "conn-user3", "likes", "users", "conn-user1");
+
+        Query query = new Query().fromQLNullSafe("");
+        query.setLimit(1);
+
+        //the result should return a valid cursor.
+        ServiceResults result = app.testRequest(ServiceAction.GET, 1, "users", "conn-user1", "connecting", "likes",query);
+        assertNotNull(result.getCursor());
+        String enityName1 = result.getEntity().getProperty("email").toString();
+
+        Query newquery = new Query().fromQLNullSafe("");
+        query.setCursor(result.getCursor());
+        result = app.testRequest(ServiceAction.GET,1,"users","conn-user1","connecting","likes",query);
+        String enityName2 = result.getEntity().getProperty("email").toString();
+
+        //ensure the two entities returned in above requests are different.
+        assertNotEquals(enityName1,enityName2);
+
+        newquery = new Query().fromQLNullSafe("");
+        query.setCursor(result.getCursor());
+        result = app.testRequest(ServiceAction.GET,0,"users","conn-user1","connecting","likes",query);
+        //return empty cursor when no more entitites found.
+        assertNull(result.getCursor());
+
+        //DELETE users/conn-user1/manages/user2/conn-user2 (qualified by collection type on second entity)
+        app.testRequest(ServiceAction.DELETE, 1, "users", "conn-user2", "likes", "users", "conn-user1");
+
+        app.testRequest(ServiceAction.GET,1,"users","conn-user1","connecting","likes");
+
+
+    }
+
     @Test
     public void testNonExistentEntity() throws Exception {
 
diff --git a/utils/usergrid-util-python/.gitignore b/utils/usergrid-util-python/.gitignore
new file mode 100644
index 0000000..a6e3315
--- /dev/null
+++ b/utils/usergrid-util-python/.gitignore
@@ -0,0 +1,61 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# custom
+sandbox
+0e4b82c5-9aad-45de-810a-ff07c281ed2d_1454177649_export.zip
diff --git a/utils/usergrid-util-python/LICENSE b/utils/usergrid-util-python/LICENSE
new file mode 100644
index 0000000..8f71f43
--- /dev/null
+++ b/utils/usergrid-util-python/LICENSE
@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/utils/usergrid-util-python/README.md b/utils/usergrid-util-python/README.md
new file mode 100644
index 0000000..7d3b533
--- /dev/null
+++ b/utils/usergrid-util-python/README.md
@@ -0,0 +1,15 @@
+# Usergrid Tools (in Python)
+
+## Prerequisites
+
+* Install the Usergrid Python SDK: `pip install usergrid`
+* Install Usergrid Tools: `pip install usergrid-tools`
+
+
+## Overview
+The purpose of this module is to provide tools for working with Usergrid.  The tools included as console scripts are:
+* `usergrid_data_migrator` - [README](https://github.com/jwest-apigee/usergrid-util-python/blob/master/usergrid_tools/migration/README.md) A tool for migrating data from one Usergrid installation to another (or org1->org2)
+* `parse_importer` - [README](https://github.com/jwest-apigee/usergrid-util-python/blob/master/usergrid_tools/parse_importer/README.md) A tool for importing data from a Parse.com data export into Usergrid
+* `index_test` -  [README](https://github.com/jwest-apigee/usergrid-util-python/blob/master/usergrid_tools/indexing/README.md) A tool for testing indexing latency in Usergrid
+
+For information on those tools, please see the respective README files
diff --git a/utils/usergrid-util-python/es_tools/alias_mover.py b/utils/usergrid-util-python/es_tools/alias_mover.py
new file mode 100644
index 0000000..bc2bfb7
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/alias_mover.py
@@ -0,0 +1,72 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+cluster = 'cluster-1'
+
+work = {
+    'remove': {
+        'app-id-1': 'from-index',
+        'app-id-2': 'from-index'
+    },
+    'add': {
+        'app-id-1': 'to-index',
+        'app-id-2': 'to-index'
+    }
+}
+
+actions = []
+
+for app_id, index in work.get('remove', {}).iteritems():
+    actions.append({
+        "remove": {
+            "index": index,
+            "alias": "%s_%s_read_alias" % (cluster, app_id)
+        },
+    })
+    actions.append({
+        "remove": {
+            "index": index,
+            "alias": "%s_%s_write_alias" % (cluster, app_id)
+        },
+    })
+
+for app_id, index in work['add'].iteritems():
+    actions.append({
+        "add": {
+            "index": index,
+            "alias": "%s_%s_read_alias" % (cluster, app_id)
+        },
+    })
+    actions.append({
+        "add": {
+            "index": index,
+            "alias": "%s_%s_write_alias" % (cluster, app_id)
+        },
+    })
+
+url = 'http://localhost:9200/_aliases'
+
+r = requests.post(url, data=json.dumps({'actions': actions}))
+
+print '%s: %s' % (r.status_code, r.text)
diff --git a/utils/usergrid-util-python/es_tools/cluster_shard_allocation.py b/utils/usergrid-util-python/es_tools/cluster_shard_allocation.py
new file mode 100644
index 0000000..8f47698
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/cluster_shard_allocation.py
@@ -0,0 +1,111 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import time
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# The purpose of this script is to set certain nodes in an ElasticSearch cluster to be excluded from index allocation,
+# generally for the purpose of decomissioning or troubleshooting a node.
+
+# you can optionally shut down nodes as they have all the replicas removed from them
+SHUTDOWN_NODES = True
+
+# these are the nodes which will have shard allocation disabled.  The replicas will then be gradually moved off these
+# nodes.  The amount of time required depends on the size of the index, speed of network, CPU and cluster load
+
+exclude_nodes = [
+    'elasticsearch206west',
+    'elasticsearch207west',
+]
+
+base_url = 'http://localhost:9200'
+
+nodes_string = ",".join(exclude_nodes)
+
+print 'Excluding: ' + nodes_string
+
+url_template = '%s/_cluster/settings' % base_url
+
+status_code = 503
+
+# when a cluster is under load, it is possible that a 5xx will be returned.
+while status_code >= 500:
+    r = requests.put(
+        '%s/_cluster/settings' % base_url,
+        data=json.dumps({
+            "transient": {
+                "cluster.routing.allocation.exclude._host": nodes_string
+            }
+        }))
+
+    status_code = r.status_code
+
+    print '%s: %s' % (r.status_code, r.text)
+
+ready = False
+
+nodes_shut_down = []
+
+while not ready:
+    ready = True
+    nodes_left = 0
+    bytes_left = 0
+
+    for node in exclude_nodes:
+        node_url = '%s/_nodes/%s/stats' % (base_url, node)
+        r = requests.get(node_url)
+
+        if r.status_code == 200:
+            # print r.text
+
+            node_stats = r.json()
+
+            for field, data in node_stats.get('nodes').iteritems():
+                if data.get('name') == node:
+                    size = data.get('indices', {}).get('store', {}).get('size_in_bytes', 1)
+                    docs = data.get('indices', {}).get('docs', {}).get('count', 1)
+
+                    if size > 0 and docs > 0:
+                        print 'Node: %s - size %s' % (node, size)
+                        bytes_left += size
+                        ready = False and ready
+                        nodes_left += 1
+                    else:
+                        if SHUTDOWN_NODES:
+                            if not node in nodes_shut_down:
+                                nodes_shut_down.append(node)
+                                shutdown_url = '%s/_cluster/nodes/%s/_shutdown' % (base_url, node)
+
+                                print 'Shutting down node %s: %s' % (node, shutdown_url)
+
+                                r = requests.post(shutdown_url)
+
+                                if r.status_code == 200:
+                                    nodes_shut_down.append(node)
+                                    print 'Shut down node %s' % node
+                                else:
+                                    print 'Shutdown failed: %s: %s' % (r.status_code, r.text)
+    if not ready:
+        print 'NOT READY! Waiting for %s nodes and %s GB' % (nodes_left, bytes_left / 1024.0 / 1000000)
+        time.sleep(10)
+
+print 'READY TO MOVE!'
diff --git a/utils/usergrid-util-python/es_tools/command_sender.py b/utils/usergrid-util-python/es_tools/command_sender.py
new file mode 100644
index 0000000..2637ca4
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/command_sender.py
@@ -0,0 +1,52 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+# Simple utility to send commands, useful to not have to recall the proper format
+
+data = {
+    "commands": [
+        {
+            "move": {
+                "index": "usergrid__APPID__application_target_final",
+                "shard": 14,
+                "from_node": "elasticsearch018",
+                "to_node": "elasticsearch021"
+            }
+        },
+        {
+            "move": {
+                "index": "usergrid__APPID__application_target_final",
+                "shard": 12,
+                "from_node": "elasticsearch018",
+                "to_node": "elasticsearch009"
+            }
+        },
+
+    ]
+}
+
+r = requests.post('http://localhost:9211/_cluster/reroute', data=json.dumps(data))
+
+print r.text
\ No newline at end of file
diff --git a/utils/usergrid-util-python/es_tools/es_index_iterator_reindexer.py b/utils/usergrid-util-python/es_tools/es_index_iterator_reindexer.py
new file mode 100644
index 0000000..101fa98
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/es_index_iterator_reindexer.py
@@ -0,0 +1,128 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import re
+from multiprocessing.pool import Pool
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# This script iterates an index and issues a PUT request for an empty string to force a reindex of the entity
+
+index_url_template = 'http://elasticsearch013wo:9200/{index_name}/_search?size={size}&from={from_var}'
+
+index_names = [
+    'es-index-name'
+]
+
+baas_url = 'http://localhost:8080/org/{app_id}/{collection}/{entity_id}'
+
+counter = 0
+size = 1000
+
+total_docs = 167501577
+from_var = 0
+page = 0
+
+work_items = []
+
+
+def work(item):
+    url = 'http://localhost:8080/org/{app_id}/{collection}/{entity_id}'.format(
+        app_id=item[0],
+        collection=item[1],
+        entity_id=item[2]
+    )
+
+    r_put = requests.put(url, data=json.dumps({'russo': ''}))
+
+    if r_put.status_code == 200:
+        print '[%s]: %s' % (r_put.status_code, url)
+
+    elif r_put.status_code:
+        print '[%s]: %s | %s' % (r_put.status_code, url, r.text)
+
+
+while from_var < total_docs:
+
+    from_var = page * size
+    page += 1
+
+    for index_name in index_names:
+
+        index_url = index_url_template.format(index_name=index_name, size=size, from_var=from_var)
+
+        print 'Getting URL: ' + index_url
+
+        r = requests.get(index_url)
+
+        if r.status_code != 200:
+            print r.text
+            exit()
+
+        response = r.json()
+
+        hits = response.get('hits', {}).get('hits')
+
+        re_app_id = re.compile('appId\((.+),')
+        re_ent_id = re.compile('entityId\((.+),')
+        re_type = re.compile('entityId\(.+,(.+)\)')
+
+        print 'Index: %s | hits: %s' % (index_name, len(hits))
+
+        for hit_data in hits:
+            source = hit_data.get('_source')
+
+            application_id = source.get('applicationId')
+
+            app_id_find = re_app_id.findall(application_id)
+
+            if len(app_id_find) > 0:
+                app_id = app_id_find[0]
+
+                if app_id != '5f20f423-f2a8-11e4-a478-12a5923b55dc':
+                    continue
+
+                entity_id_tmp = source.get('entityId')
+
+                entity_id_find = re_ent_id.findall(entity_id_tmp)
+                entity_type_find = re_type.findall(entity_id_tmp)
+
+                if len(entity_id_find) > 0 and len(entity_type_find) > 0:
+                    entity_id = entity_id_find[0]
+                    collection = entity_type_find[0]
+
+                    if collection in ['logs', 'log']:
+                        print 'skipping logs...'
+                        continue
+
+                    work_items.append((app_id, collection, entity_id))
+
+                    counter += 1
+
+pool = Pool(16)
+
+print 'Work Items: %s' % len(work_items)
+
+print 'Starting Work'
+
+pool.map(work, work_items)
+
+print 'done: %s' % counter
diff --git a/utils/usergrid-util-python/es_tools/es_searcher.py b/utils/usergrid-util-python/es_tools/es_searcher.py
new file mode 100644
index 0000000..5fb66dc
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/es_searcher.py
@@ -0,0 +1,45 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import requests
+
+# Simple example of searching for a specific entity in ES
+
+__author__ = 'Jeff.West@yahoo.com'
+
+INDEX_NAME=''
+
+url_template = 'http://localhost:9200/%s/_search' % INDEX_NAME
+
+request = {
+    "query": {
+        "term": {
+            "entityId": "entityId(1a78d0a6-bffb-11e5-bc61-0af922a4f655,superbad)"
+        }
+    }
+}
+
+# url_template = 'http://localhost:9200/_search'
+# r = requests.get(url)
+r = requests.get(url_template, data=json.dumps(request))
+
+print r.status_code
+print json.dumps(r.json(), indent=2)
+
diff --git a/utils/usergrid-util-python/es_tools/index_deleter.py b/utils/usergrid-util-python/es_tools/index_deleter.py
new file mode 100644
index 0000000..cb69f00
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/index_deleter.py
@@ -0,0 +1,98 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+from multiprocessing import Pool
+
+import requests
+import logging
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+# utility for deleting indexes that are no longer needed.  Given:
+# A) a set of strings to include when evaluating the index names to delete
+# B) a set of strings to Exclude when evaluating the index names to delete
+#
+# The general logic is:
+# 1) If the include set is empty, or if the index name contains a string in the 'include' set, then delete
+# 2) If the index contains a string in the exclude list, do not delete
+
+url_base = 'http://localhost:9200'
+
+r = requests.get(url_base + "/_stats")
+
+indices = r.json()['indices']
+
+print 'retrieved %s indices' % len(indices)
+
+NUMBER_VALUE = 0
+
+includes = [
+    'cluster1',
+    # 'b6768a08-b5d5-11e3-a495-10ddb1de66c3',
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+]
+
+excludes = [
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c8',
+    # 'b6768a08-b5d5-11e3-a495-10ddb1de66c3',
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+    # 'a34ad389-b626-11e4-848f-06b49118d7d0'
+]
+
+counter = 0
+process = False
+delete_counter = 0
+
+indexes_to_delete = []
+
+
+def delete_index(index_name):
+    url_template = '%s/%s' % (url_base, index_name)
+    print 'DELETING Index [%s] %s at URL %s' % (delete_counter, index_name, url_template)
+    response = requests.delete('%s/%s' % (url_base, index))
+
+for index in indices:
+    process = False
+    counter += 1
+
+    print 'index %s of %s' % (counter, len(indices))
+
+    if len(includes) == 0:
+        process = True
+    else:
+        for include in includes:
+
+            if include in index:
+                process = True
+
+    if len(excludes) > 0:
+        for exclude in excludes:
+            if exclude in index:
+                process = False
+
+    if process:
+        indexes_to_delete.append(index)
+
+print 'Found [%s] indexes to delete: %s' % (len(indexes_to_delete), indexes_to_delete)
+
+if len(indexes_to_delete) > 0:
+    pool = Pool(4)
+    pool.map(delete_index, indexes_to_delete)
+
+print 'Done!'
\ No newline at end of file
diff --git a/utils/usergrid-util-python/es_tools/index_prefix_checker.py b/utils/usergrid-util-python/es_tools/index_prefix_checker.py
new file mode 100644
index 0000000..d6a5d70
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/index_prefix_checker.py
@@ -0,0 +1,100 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from collections import defaultdict
+import requests
+
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# This script iterates all the indexes in an ES cluster and aggregates the size by the prefix
+
+url_base = 'http://localhost:9200'
+
+r = requests.get(url_base + "/_stats")
+response = r.json()
+
+indices = r.json()['indices']
+
+print 'retrieved %s indices' % len(indices)
+
+NUMBER_VALUE = 0
+
+includes = [
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+]
+
+excludes = [
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c8',
+]
+
+counter = 0
+process = False
+
+counts = defaultdict(int)
+sizes = defaultdict(int)
+indexes = {}
+
+for index, index_data in indices.iteritems():
+    process = False
+    counter += 1
+
+    if 'management' in index:
+        print index
+
+    # print 'index %s of %s' % (counter, len(indices))
+
+    if len(includes) == 0:
+        process = True
+    else:
+        for include in includes:
+
+            if include in index:
+                process = True
+
+    if len(excludes) > 0:
+        for exclude in excludes:
+            if exclude in index:
+                process = False
+
+    if process:
+        # print index
+        if '__' in index:
+            index_prefix = index.split('__')[0]
+        elif '^' in index:
+            index_prefix = index.split('^')[0]
+        else:
+            index_prefix = index.split('_')[0]
+
+        if index_prefix not in indexes:
+            indexes[index_prefix] = []
+
+        indexes[index_prefix].append(index)
+
+        counts[index_prefix] += 1
+        counts['total'] += 1
+        sizes[index_prefix] += (float(index_data.get('total', {}).get('store', {}).get('size_in_bytes')) / 1e+9)
+        sizes['total'] += (float(index_data.get('total', {}).get('store', {}).get('size_in_bytes')) / 1e+9)
+
+print 'Number of indices (US-EAST):'
+print json.dumps(counts, indent=2)
+print 'Size in GB'
+print json.dumps(sizes, indent=2)
+print json.dumps(indexes, indent=2)
diff --git a/utils/usergrid-util-python/es_tools/index_replica_setter.py b/utils/usergrid-util-python/es_tools/index_replica_setter.py
new file mode 100644
index 0000000..7f27d70
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/index_replica_setter.py
@@ -0,0 +1,124 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from multiprocessing import Pool
+import requests
+import time
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# utility for updating the replicas of a set of indexes that are no longer needed.  Given:
+# A) a set of strings to include when evaluating the index names to update
+# B) a set of strings to Exclude when evaluating the index names to update
+#
+# The general logic is:
+# 1) If the include set is empty, or if the index name contains a string in the 'include' set, then update
+# 2) If the index contains a string in the exclude list, do not update
+
+
+url_base = 'http://localhost:9200'
+
+# r = requests.get(url_base + "/_cat/indices?v")
+# print r.text
+
+r = requests.get(url_base + "/_stats")
+
+# print json.dumps(r.json(), indent=2)
+
+indices = r.json()['indices']
+
+print 'retrieved %s indices' % len(indices)
+
+NUMBER_VALUE = 1
+
+payload = {
+    "index.number_of_replicas": NUMBER_VALUE,
+}
+
+includes = [
+    # '70be096e-c2e1-11e4-8a55-12b4f5e28868',
+]
+
+excludes = [
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c8',
+    # 'b6768a08-b5d5-11e3-a495-10ddb1de66c3',
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+    # 'a34ad389-b626-11e4-848f-06b49118d7d0'
+]
+
+counter = 0
+update = False
+# print 'sleeping 1200s'
+# time.sleep(1200)
+
+index_names = sorted([index for index in indices])
+
+
+def update_shards(index_name):
+    update = False
+    # counter += 1
+    #
+    # print 'index %s of %s' % (counter, len(indices))
+
+    if len(includes) == 0:
+        update = True
+    else:
+        for include in includes:
+
+            if include in index_name:
+                update = True
+
+    if len(excludes) > 0:
+        for exclude in excludes:
+            if exclude in index_name:
+                update = False
+
+    if update:
+        print index_name
+
+        url = '%s/%s/_settings' % (url_base, index)
+        print url
+
+        response = requests.get('%s/%s/_settings' % (url_base, index))
+        settings = response.json()
+
+        index_settings = settings[index]['settings']['index']
+
+        current_replicas = int(index_settings.get('number_of_replicas'))
+
+        if current_replicas == NUMBER_VALUE:
+            # no action required
+            return
+
+        success = False
+
+        while not success:
+
+            response = requests.put('%s/%s/_settings' % (url_base, index_name), data=payload)
+
+            if response.status_code == 200:
+                success = True
+                print '200: %s: %s' % (index_name, response.text)
+            else:
+                print '%s: %s: %s' % (response.status_code, index_name, response.text)
+
+
+pool = Pool(8)
+
+pool.map(update_shards, index_names)
diff --git a/utils/usergrid-util-python/es_tools/index_shard_allocator.py b/utils/usergrid-util-python/es_tools/index_shard_allocator.py
new file mode 100644
index 0000000..47e05e2
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/index_shard_allocator.py
@@ -0,0 +1,149 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from multiprocessing import Pool
+
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# The purpose of this script is to update the shard allocation of ElasticSearch for specific indexes to be set to
+# specific nodes.  The reason for doing this is to isolate the nodes on which certain indexes run for specific
+# customers due to load, disk size or any other factors.
+
+
+nodes_c32xl = [
+    'elasticsearch000eu',
+    'elasticsearch001eu',
+    'elasticsearch002eu'
+]
+
+nodes_c34xl = [
+    'elasticsearch015eu',
+    'elasticsearch018eu',
+    'elasticsearch019eu'
+]
+
+nodes = nodes_c34xl
+
+url_base = 'http://localhost:9200'
+
+nodes_string = ",".join(nodes)
+
+payload = {
+    "index.routing.allocation.include._host": "",
+    "index.routing.allocation.exclude._host": nodes_string
+}
+
+# payload = {
+#     "index.routing.allocation.include._host": "",
+#     "index.routing.allocation.exclude._host": ""
+# }
+
+print json.dumps(payload )
+
+
+r = requests.get(url_base + "/_stats")
+indices = r.json()['indices']
+
+print 'retrieved %s indices' % len(indices)
+
+includes = [
+    # '70be096e-c2e1-11e4-8a55-12b4f5e28868',
+    # 'b0c640af-bc6c-11e4-b078-12b4f5e28868',
+    # 'e62e465e-bccc-11e4-b078-12b4f5e28868',
+    # 'd82b6413-bccc-11e4-b078-12b4f5e28868',
+    # '45914256-c27f-11e4-8a55-12b4f5e28868',
+    # '2776a776-c27f-11e4-8a55-12b4f5e28868',
+    # 'a54f878c-bc6c-11e4-b044-0e4cd56e19cd',
+    # 'ed5b47ea-bccc-11e4-b078-12b4f5e28868',
+    # 'bd4874ab-bccc-11e4-b044-0e4cd56e19cd',
+    # '3d748996-c27f-11e4-8a55-12b4f5e28868',
+    # '1daab807-c27f-11e4-8a55-12b4f5e28868',
+    # 'd0c4f0da-d961-11e4-849d-12b4f5e28868',
+    # '93e756ac-bc4e-11e4-92ae-12b4f5e28868',
+    #
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c8',
+    # 'b6768a08-b5d5-11e3-a495-10ddb1de66c3',
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+]
+
+excludes = [
+    #
+    # '70be096e-c2e1-11e4-8a55-12b4f5e28868',
+    # 'b0c640af-bc6c-11e4-b078-12b4f5e28868',
+    # 'e62e465e-bccc-11e4-b078-12b4f5e28868',
+    # 'd82b6413-bccc-11e4-b078-12b4f5e28868',
+    # '45914256-c27f-11e4-8a55-12b4f5e28868',
+    # '2776a776-c27f-11e4-8a55-12b4f5e28868',
+    # 'a54f878c-bc6c-11e4-b044-0e4cd56e19cd',
+    # 'ed5b47ea-bccc-11e4-b078-12b4f5e28868',
+    # 'bd4874ab-bccc-11e4-b044-0e4cd56e19cd',
+    # '3d748996-c27f-11e4-8a55-12b4f5e28868',
+    # '1daab807-c27f-11e4-8a55-12b4f5e28868',
+    # 'd0c4f0da-d961-11e4-849d-12b4f5e28868',
+    # '93e756ac-bc4e-11e4-92ae-12b4f5e28868',
+    #
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c8',
+    # 'b6768a08-b5d5-11e3-a495-10ddb1de66c3',
+    # 'b6768a08-b5d5-11e3-a495-11ddb1de66c9',
+]
+
+counter = 0
+update = False
+
+for index_name in indices:
+    update = False
+    counter += 1
+
+    # print 'Checking index %s of %s: %s' % (counter, len(indices), index_name)
+
+    if len(includes) == 0:
+        update = True
+    else:
+        for include in includes:
+
+            if include in index_name:
+                update = True
+
+    if len(excludes) > 0:
+        for exclude in excludes:
+            if exclude in index_name:
+                update = False
+
+    if not update:
+        print 'Skipping %s of %s: %s' % (counter, len(indices), index_name)
+    else:
+        print '+++++Processing %s of %s: %s' % (counter, len(indices), index_name)
+
+        url_template = '%s/%s/_settings' % (url_base, index_name)
+        print url_template
+
+        success = False
+
+        while not success:
+
+            response = requests.put('%s/%s/_settings' % (url_base, index_name), data=json.dumps(payload))
+
+            if response.status_code == 200:
+                success = True
+                print '200: %s: %s' % (index_name, response.text)
+            else:
+                print '%s: %s: %s' % (response.status_code, index_name, response.text)
diff --git a/utils/usergrid-util-python/es_tools/mapping_deleter.py b/utils/usergrid-util-python/es_tools/mapping_deleter.py
new file mode 100644
index 0000000..a070d76
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/mapping_deleter.py
@@ -0,0 +1,53 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+
+import requests
+
+
+__author__ = 'Jeff.West@yahoo.com'
+
+url_base = 'http://localhost:9200'
+
+SOURCE_INDEX = '5f20f423-f2a8-11e4-a478-12a5923b55dc__application_v6'
+
+url_template = '%s/{index_name}/_mapping' % url_base
+
+source_index_url = '%s/%s' % (url_base, SOURCE_INDEX)
+
+index_name = SOURCE_INDEX
+
+index_data = requests.get(url_template.format(index_name=index_name)).json()
+
+mappings = index_data.get(index_name, {}).get('mappings', {})
+
+for type_name, mapping_detail in mappings.iteritems():
+    print 'Index: %s | Type: %s: | Properties: %s' % (index_name, type_name, len(mappings[type_name]['properties']))
+
+    if type_name == '_default_':
+        continue
+
+    r = requests.delete('%s/%s/_mapping/%s' % (url_base, index_name, type_name))
+
+    print '%s: %s' % (r.status_code, r.text)
+
+    # print json.dumps(r.json(), indent=2)
+    # time.sleep(5)
+    print '---'
diff --git a/utils/usergrid-util-python/es_tools/mapping_retriever.py b/utils/usergrid-util-python/es_tools/mapping_retriever.py
new file mode 100644
index 0000000..473f278
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/mapping_retriever.py
@@ -0,0 +1,64 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# Utility to iterate the mappings for an index and save them locally
+
+url_base = 'http://localhost:9200'
+
+# r = requests.get(url_base + "/_stats")
+#
+# indices = r.json()['indices']
+
+url_template = '%s/{index_name}/_mapping' % url_base
+
+SOURCE_INDEX = '5f20f423-f2a8-11e4-a478-12a5923b55dc__application_v7'
+
+source_index_url = '%s/%s' % (url_base, SOURCE_INDEX)
+
+index_name = SOURCE_INDEX
+print 'Getting ' + url_template.format(index_name=index_name)
+
+r = requests.get(url_template.format(index_name=index_name))
+index_data = r.json()
+
+mappings = index_data.get(index_name, {}).get('mappings', {})
+
+for type_name, mapping_detail in mappings.iteritems():
+    print 'Index: %s | Type: %s: | Properties: %s' % (index_name, type_name, len(mappings[type_name]['properties']))
+
+    print 'Processing %s' % type_name
+
+    filename = '/tmp/%s_%s_source_mapping.json' % (
+        SOURCE_INDEX, type_name)
+
+    print filename
+
+    with open(filename, 'w') as f:
+        json.dump({type_name: mapping_detail}, f, indent=2)
+
+    # print '%s' % (r.status_code, r.text)
+
+    # print json.dumps(r.json(), indent=2)
+    # time.sleep(5)
+    print 'Done!'
diff --git a/utils/usergrid-util-python/es_tools/monitor_tasks.py b/utils/usergrid-util-python/es_tools/monitor_tasks.py
new file mode 100644
index 0000000..7ceb61d
--- /dev/null
+++ b/utils/usergrid-util-python/es_tools/monitor_tasks.py
@@ -0,0 +1,61 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import datetime
+import requests
+import time
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# Utility for monitoring pending tasks in ElasticSearch
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+url_template = "http://localhost:9200/_cat/pending_tasks?v'"
+
+x = 0
+
+SLEEP_TIME = 3
+
+while True:
+    x += 13
+    try:
+
+        r = requests.get(url=url_template)
+        lines = r.text.split('\n')
+
+        print '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-'
+        print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
+        print datetime.datetime.utcnow()
+        if len(lines) > 1:
+            print r.text
+        else:
+            print 'None'
+
+        print '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-'
+        print '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n'
+
+    except:
+        pass
+
+    time.sleep(SLEEP_TIME)
+
diff --git a/utils/usergrid-util-python/index_test/README.md b/utils/usergrid-util-python/index_test/README.md
new file mode 100644
index 0000000..eed7f1c
--- /dev/null
+++ b/utils/usergrid-util-python/index_test/README.md
@@ -0,0 +1 @@
+This set of scripts was intended to test indexing times and sizes for the new indexing scheme
\ No newline at end of file
diff --git a/utils/usergrid-util-python/index_test/document_creator.py b/utils/usergrid-util-python/index_test/document_creator.py
new file mode 100644
index 0000000..f13ccc2
--- /dev/null
+++ b/utils/usergrid-util-python/index_test/document_creator.py
@@ -0,0 +1,276 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from __future__ import print_function
+from Queue import Empty
+import json
+from multiprocessing import JoinableQueue, Process
+import random
+import re
+import uuid
+import sys
+
+import argparse
+
+import loremipsum
+
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='ElasticSearch Index Test 1')
+
+    parser.add_argument('-w', '--workers',
+                        help='The number of worker threads',
+                        type=int,
+                        default=8)
+
+    parser.add_argument('-dc', '--document_count',
+                        help='The number of documents per index',
+                        type=long,
+                        default=100000000)
+
+    parser.add_argument('--output',
+                        help='The filename to write to',
+                        type=str,
+                        default='generated_documents.txt')
+
+    parser.add_argument('--fields_min',
+                        help='The min number of fields per document',
+                        type=long,
+                        default=10)
+
+    parser.add_argument('--fields_max',
+                        help='The max number of fields per document',
+                        type=long,
+                        default=100)
+
+    parser.add_argument('-tp', '--type_prefix',
+                        help='The Prefix to use for type names',
+                        type=str,
+                        default='type_this')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+args = parse_args()
+
+sentence_list = loremipsum.get_sentences(10000)
+
+
+class Worker(Process):
+    def __init__(self, work_queue, response_queue):
+        super(Worker, self).__init__()
+        self.work_queue = work_queue
+        self.response_queue = response_queue
+        self.sentence_list = loremipsum.get_sentences(1000)
+        self.re_first_word = re.compile('([A-z]+)')
+
+    def run(self):
+        print('Starting %s ' % self.name)
+
+        while True:
+            task = self.work_queue.get(timeout=600)
+            field_count = random.randint(task['fields_min'], task['fields_max'])
+            document = self.generate_document(field_count)
+            flattened_doc = self.process_document(document,
+                                                  task['uuid'],
+                                                  task['uuid'])
+
+            self.response_queue.put(flattened_doc)
+
+            self.work_queue.task_done()
+
+    def generate_document(self, fields):
+
+        doc = {}
+
+        my_bool = True
+
+        for i in xrange(fields):
+            sentence_index = random.randint(0, max((fields / 2) - 1, 1))
+            sentence = self.sentence_list[sentence_index]
+
+            if random.random() >= .5:
+                key = self.re_first_word.findall(sentence)[1]
+            else:
+                key = self.re_first_word.findall(sentence)[1] + str(i)
+
+            field_type = random.random()
+
+            if field_type <= 0.3:
+                doc[key] = sentence
+
+            elif field_type <= 0.5:
+                doc[key] = random.randint(1, 1000000)
+
+            elif field_type <= 0.6:
+                doc[key] = random.random() * 1000000000
+
+            elif field_type == 0.7:
+                doc[key] = my_bool
+                my_bool = not my_bool
+
+            elif field_type == 0.8:
+                doc[key] = self.generate_document(max(fields / 5, 1))
+
+            elif field_type <= 1.0:
+                doc['mylocation'] = self.generate_location()
+
+        return doc
+
+    @staticmethod
+    def get_fields(document, base_name=None):
+        fields = []
+
+        for name, value in document.iteritems():
+            if base_name:
+                field_name = '%s.%s' % (base_name, name)
+            else:
+                field_name = name
+
+            if isinstance(value, dict):
+                fields += Worker.get_fields(value, field_name)
+            else:
+                value_name = None
+                if isinstance(value, basestring):
+                    value_name = 'string'
+
+                elif isinstance(value, bool):
+                    value_name = 'boolean'
+
+                elif isinstance(value, (int, long)):
+                    value_name = 'long'
+
+                elif isinstance(value, float):
+                    value_name = 'double'
+
+                if value_name:
+                    field = {
+                        'name': field_name,
+                        value_name: value
+                    }
+                else:
+                    field = {
+                        'name': field_name,
+                        'string': str(value)
+                    }
+
+                fields.append(field)
+
+        return fields
+
+
+    @staticmethod
+    def process_document(document, application_id, uuid):
+        response = {
+            'entityId': uuid,
+            'entityVersion': '1',
+            'applicationId': application_id,
+            'fields': Worker.get_fields(document)
+        }
+
+        return response
+
+    def generate_location(self):
+        response = {}
+
+        lat = random.random() * 90.0
+        lon = random.random() * 180.0
+
+        lat_neg_true = True if lon > .5 else False
+        lon_neg_true = True if lat > .5 else False
+
+        lat = lat * -1.0 if lat_neg_true else lat
+        lon = lon * -1.0 if lon_neg_true else lon
+
+        response['location'] = {
+            'lat': lat,
+            'lon': lon
+        }
+
+        return response
+
+
+class Writer(Process):
+    def __init__(self, document_queue):
+        super(Writer, self).__init__()
+        self.document_queue = document_queue
+
+    def run(self):
+        keep_going = True
+
+        with open(args['output'], 'w') as f:
+            while keep_going:
+                try:
+                    document = self.document_queue.get(timeout=300)
+                    print(json.dumps(document), file=f)
+
+                except Empty:
+                    print('done!')
+                    keep_going = False
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def main():
+    work_queue = JoinableQueue()
+    response_queue = JoinableQueue()
+
+    workers = [Worker(work_queue, response_queue) for x in xrange(args.get('workers'))]
+
+    writer = Writer(response_queue)
+    writer.start()
+
+    [worker.start() for worker in workers]
+
+    try:
+        total_messages = args.get('document_count')
+        batch_size = 100000
+        message_counter = 0
+
+        for doc_number in xrange(total_messages):
+            message_counter += 1
+
+            for count in xrange(batch_size):
+                doc_id = str(uuid.uuid1())
+
+                task = {
+                    'fields_min': args['fields_min'],
+                    'fields_max': args['fields_max'],
+                    'uuid': doc_id
+                }
+
+                work_queue.put(task)
+
+        print('Joining queues counter=[%s]...' % message_counter)
+        work_queue.join()
+        response_queue.join()
+        print('Done queue counter=[%s]...' % message_counter)
+
+    except KeyboardInterrupt:
+        [worker.terminate() for worker in workers]
+
+
+main()
\ No newline at end of file
diff --git a/utils/usergrid-util-python/index_test/index_test_mixed_batch.py b/utils/usergrid-util-python/index_test/index_test_mixed_batch.py
new file mode 100644
index 0000000..99db41f
--- /dev/null
+++ b/utils/usergrid-util-python/index_test/index_test_mixed_batch.py
@@ -0,0 +1,552 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from multiprocessing import JoinableQueue, Process
+import random
+import re
+import traceback
+import uuid
+import time
+import sys
+import argparse
+import loremipsum
+import requests
+from elasticsearch import Elasticsearch
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+es_hosts = [
+    {'host': 'elasticsearch000west', 'port': 9200},
+    {'host': 'elasticsearch001west', 'port': 9200},
+    {'host': 'elasticsearch002west', 'port': 9200},
+    {'host': 'elasticsearch003west', 'port': 9200}
+]
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='ElasticSearch Index Test 1')
+
+    parser.add_argument('-t', '--type_count',
+                        help='The number of types to produce',
+                        type=int,
+                        default=100)
+
+    parser.add_argument('-ic', '--index_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=10)
+
+    parser.add_argument('-sc', '--shard_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=18)
+
+    parser.add_argument('-rc', '--replica_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=1)
+
+    parser.add_argument('-w', '--workers',
+                        help='The number of worker threads',
+                        type=int,
+                        default=8)
+
+    parser.add_argument('-dc', '--document_count',
+                        help='The number of documents per index',
+                        type=long,
+                        default=100000000)
+
+    parser.add_argument('-bs', '--batch_size',
+                        help='The size of batches to send to ES',
+                        type=long,
+                        default=25)
+
+    parser.add_argument('-ip', '--index_prefix',
+                        help='The Prefix to use for index names',
+                        type=str,
+                        default='apigee_ftw')
+
+    parser.add_argument('-tp', '--type_prefix',
+                        help='The Prefix to use for type names',
+                        type=str,
+                        default='type_this')
+
+    parser.add_argument('-s', '--setup',
+                        help='The Prefix to use for type names',
+                        action='store_true')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+args = parse_args()
+
+
+class APIClient():
+    def __init__(self, base_url):
+        self.base_url = base_url
+
+    def put(self, path='/', data=None):
+        if not data:
+            data = {}
+
+        url = '%s%s' % (self.base_url, path)
+        r = requests.put(url, json.dumps(data))
+
+        if r.status_code == 200:
+            print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling PUT on URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def index_batch(self, batch):
+
+        data = ''
+
+        for element in batch:
+            index_tuple = element[0]
+            doc = element[1]
+            data += '{ "index" : { "_index" : "%s", "_type" : "%s", "_id" : "%s" } }\n' % (
+                index_tuple[0], index_tuple[1], doc['entityId'])
+            data += json.dumps(doc)
+            data += '\n'
+
+        url = '%s/_bulk' % self.base_url
+
+        # print data
+
+        r = requests.post(url, data)
+
+        # print json.dumps(r.json(), indent=2)
+
+        if r.status_code == 200:
+            print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling POST URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def delete(self, index):
+        url = '%s%s' % (self.base_url, index)
+        r = requests.delete(url)
+
+        if r.status_code == 200:
+            print 'DELETE (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling DELETE URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def create_index(self, name=None, shards=18 * 3, replicas=1):
+        data = {
+            "settings": {
+                "index": {
+                    "action": {
+                        "write_consistency": "one"
+                    },
+                    "number_of_shards": shards,
+                    "number_of_replicas": replicas
+                }
+            }
+        }
+
+        try:
+            print 'Creating index %s' % name
+            response = self.put('/%s/' % name.lower(), data)
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+    def delete_index(self, name):
+        try:
+            response = self.delete('/%s/' % name.lower())
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+    def define_type_mapping(self, index_name, type_name):
+        try:
+            url = '/%s/_mapping/%s' % (index_name, type_name)
+            print url
+
+            response = self.put(url, get_type_mapping(type_name))
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+
+class Worker(Process):
+    def __init__(self, work_queue):
+        super(Worker, self).__init__()
+        self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
+        self.work_queue = work_queue
+        self.es = Elasticsearch(es_hosts)
+        self.sentence_list = loremipsum.get_sentences(1000)
+        self.re_first_word = re.compile('([A-z]+)')
+
+    def run(self):
+        print 'Starting %s ' % self.name
+        counter = 0
+
+        batch = []
+
+        while True:
+            index_batch_size = args.get('batch_size')
+            task = self.work_queue.get(timeout=600)
+            counter += 1
+
+            document = self.generate_document(task['field_count'])
+            flattened_doc = self.process_document(document,
+                                                  task['type'],
+                                                  task['uuid'],
+                                                  task['uuid'])
+
+            index_type_tuple = (task['index'], task['type'])
+
+            # self.handle_document(task['index'], task['type'], task['uuid'], flattened_doc)
+
+            batch.append((index_type_tuple, flattened_doc))
+
+            if len(batch) >= index_batch_size:
+                self.handle_batch(batch)
+                batch = []
+
+            self.work_queue.task_done()
+
+    def generate_document(self, fields):
+
+        doc = {}
+
+        my_bool = True
+
+        for i in xrange(fields):
+            sentence_index = random.randint(0, max((fields / 2) - 1, 1))
+            sentence = self.sentence_list[sentence_index]
+
+            if random.random() >= .5:
+                key = self.re_first_word.findall(sentence)[1]
+            else:
+                key = self.re_first_word.findall(sentence)[1] + str(i)
+
+            field_type = random.random()
+
+            if field_type <= 0.3:
+                doc[key] = sentence
+
+            elif field_type <= 0.5:
+                doc[key] = random.randint(1, 1000000)
+
+            elif field_type <= 0.6:
+                doc[key] = random.random() * 1000000000
+
+            elif field_type == 0.7:
+                doc[key] = my_bool
+                my_bool = not my_bool
+
+            elif field_type == 0.8:
+                doc[key] = self.generate_document(max(fields / 5, 1))
+
+            elif field_type <= 1.0:
+                doc['mylocation'] = self.generate_location()
+
+        return doc
+
+    @staticmethod
+    def get_fields(document, base_name=None):
+        fields = []
+
+        for name, value in document.iteritems():
+            if base_name:
+                field_name = '%s.%s' % (base_name, name)
+            else:
+                field_name = name
+
+            if isinstance(value, dict):
+                fields += Worker.get_fields(value, field_name)
+            else:
+                value_name = None
+                if isinstance(value, basestring):
+                    value_name = 'string'
+
+                elif isinstance(value, bool):
+                    value_name = 'boolean'
+
+                elif isinstance(value, (int, long)):
+                    value_name = 'long'
+
+                elif isinstance(value, float):
+                    value_name = 'double'
+
+                if value_name:
+                    field = {
+                        'name': field_name,
+                        value_name: value
+                    }
+                else:
+                    field = {
+                        'name': field_name,
+                        'string': str(value)
+                    }
+
+                fields.append(field)
+
+        return fields
+
+
+    @staticmethod
+    def process_document(document, doc_type, application_id, uuid):
+        response = {
+            'entityId': uuid,
+            'entityVersion': '1',
+            'entityType': doc_type,
+            'applicationId': application_id,
+            'fields': Worker.get_fields(document)
+        }
+
+        return response
+
+    def handle_document(self, index, doc_type, uuid, document):
+
+        res = self.es.create(index=index,
+                             doc_type=doc_type,
+                             id=uuid,
+                             body=document)
+
+        print res
+
+    def generate_location(self):
+        response = {}
+
+        lat = random.random() * 90.0
+        lon = random.random() * 180.0
+
+        lat_neg_true = True if lon > .5 else False
+        lon_neg_true = True if lat > .5 else False
+
+        lat = lat * -1.0 if lat_neg_true else lat
+        lon = lon * -1.0 if lon_neg_true else lon
+
+        response['location'] = {
+            'lat': lat,
+            'lon': lon
+        }
+
+        return response
+
+    def handle_batch(self, batch):
+        print 'HANDLE BATCH size=%s' % len(batch)
+        # self.api_client.define_type_mapping(index, doc_type)
+        self.api_client.index_batch(batch)
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_type_mapping(type_name):
+    return {
+        type_name: {
+            "_routing": {
+                "path": "entityId",
+                "required": True
+            },
+            "properties": {
+                "entityId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityVersion": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityType": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "applicationId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "nodeId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "edgeName": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityNodeType": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "edgeTimestamp": {
+                    "type": "long",
+                    "doc_values": True
+                },
+                "edgeSearch": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "fields": {
+                    "type": "nested",
+                    "properties": {
+                        "name": {
+                            "type": "string",
+                            "index": "not_analyzed",
+                            "doc_values": True
+                        },
+                        "boolean": {
+                            "type": "boolean",
+                            "doc_values": True
+                        },
+                        "long": {
+                            "type": "long",
+                            "doc_values": True
+                        },
+                        "double": {
+                            "type": "double",
+                            "doc_values": True
+                        },
+                        "location": {
+                            "type": "geo_point",
+                            "lat_lon": True,
+                            "geohash": True,
+                            "doc_values": True
+                        },
+                        "string": {
+                            "type": "string",
+                            "norms": {
+                                "enabled": False
+                            },
+                            "fields": {
+                                "exact": {
+                                    "type": "string",
+                                    "index": "not_analyzed",
+                                    "doc_values": True
+                                }
+                            }
+                        },
+                        "uuid": {
+                            "type": "string",
+                            "index": "not_analyzed",
+                            "doc_values": True
+                        }
+                    }
+                }
+            },
+            "_all": {
+                "enabled": False
+            }
+
+        }
+    }
+
+
+def main():
+    INDEX_COUNT = args.get('index_count')
+    TYPE_COUNT = args.get('type_count')
+    SETUP = args.get('setup')
+
+    indices = []
+    types = []
+    work_queue = JoinableQueue()
+
+    apiclient = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
+
+    workers = [Worker(work_queue) for x in xrange(args.get('workers'))]
+    [worker.start() for worker in workers]
+
+    try:
+        #
+        for x in xrange(TYPE_COUNT):
+            type_name = '%s_%s' % (args.get('type_prefix'), x)
+            types.append(type_name)
+
+        for x in xrange(INDEX_COUNT):
+            index_name = '%s_%s' % (args.get('index_prefix'), x)
+            indices.append(index_name)
+
+        if SETUP:
+            print 'Running setup...'
+
+            for index_name in indices:
+                apiclient.delete_index(index_name)
+
+            time.sleep(1)
+
+            for index_name in indices:
+                apiclient.create_index(
+                    index_name,
+                    shards=args['shard_count'],
+                    replicas=args['replica_count'])
+
+                # time.sleep(5)
+
+                # for index_name in indices:
+                # for type_name in types:
+                # apiclient.define_type_mapping(index_name, type_name)
+
+                # time.sleep(5)
+
+        total_messages = args.get('document_count')
+        batch_size = 100000
+        message_counter = 0
+        fields = random.randint(50, 100)
+
+        while message_counter < total_messages:
+
+            for count in xrange(batch_size):
+
+                for index_name in indices:
+                    doc_id = str(uuid.uuid1())
+
+                    task = {
+                        'field_count': fields,
+                        'uuid': doc_id,
+                        'index': index_name,
+                        'type': types[random.randint(0, len(types) - 1)]
+                    }
+
+                    work_queue.put(task)
+
+            print 'Joining queue counter=[%s]...' % message_counter
+            work_queue.join()
+            print 'Done queue counter=[%s]...' % message_counter
+            message_counter += batch_size
+
+    except KeyboardInterrupt:
+        [worker.terminate() for worker in workers]
+
+
+main()
\ No newline at end of file
diff --git a/utils/usergrid-util-python/index_test/index_test_single_type_batch.py b/utils/usergrid-util-python/index_test/index_test_single_type_batch.py
new file mode 100644
index 0000000..f5ee9d6
--- /dev/null
+++ b/utils/usergrid-util-python/index_test/index_test_single_type_batch.py
@@ -0,0 +1,555 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from multiprocessing import JoinableQueue, Process
+import random
+import re
+import traceback
+import uuid
+import time
+import sys
+
+import argparse
+import loremipsum
+import requests
+from elasticsearch import Elasticsearch
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+es_hosts = [
+    {'host': 'elasticsearch000west', 'port': 9200},
+    {'host': 'elasticsearch001west', 'port': 9200},
+    {'host': 'elasticsearch002west', 'port': 9200},
+    {'host': 'elasticsearch003west', 'port': 9200}
+]
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='ElasticSearch Index Test 1')
+
+    parser.add_argument('-t', '--type_count',
+                        help='The number of types to produce',
+                        type=int,
+                        default=50)
+
+    parser.add_argument('-ic', '--index_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=50)
+
+    parser.add_argument('-sc', '--shard_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=50)
+
+    parser.add_argument('-rc', '--replica_count',
+                        help='The number of indices to create',
+                        type=int,
+                        default=1)
+
+    parser.add_argument('-w', '--workers',
+                        help='The number of worker threads',
+                        type=int,
+                        default=8)
+
+    parser.add_argument('-dc', '--document_count',
+                        help='The number of documents per index',
+                        type=long,
+                        default=100000000)
+
+    parser.add_argument('-bs', '--batch_size',
+                        help='The size of batches to send to ES',
+                        type=long,
+                        default=25)
+
+    parser.add_argument('-ip', '--index_prefix',
+                        help='The Prefix to use for index names',
+                        type=str,
+                        default='apigee_ftw')
+
+    parser.add_argument('-tp', '--type_prefix',
+                        help='The Prefix to use for type names',
+                        type=str,
+                        default='type_this')
+
+    parser.add_argument('-s', '--setup',
+                        help='The Prefix to use for type names',
+                        action='store_true')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+args = parse_args()
+
+
+class APIClient():
+    def __init__(self, base_url):
+        self.base_url = base_url
+
+    def put(self, path='/', data=None):
+        if not data:
+            data = {}
+
+        url = '%s%s' % (self.base_url, path)
+        r = requests.put(url, json.dumps(data))
+
+        if r.status_code == 200:
+            print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling PUT on URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def index_docs(self, index, documents, type):
+
+        data = ''
+
+        for doc in documents:
+            data += '{ "index" : { "_index" : "%s", "_type" : "%s", "_id" : "%s" } }\n' % (index, type, doc['entityId'])
+            data += json.dumps(doc)
+            data += '\n'
+
+        url = '%s/_bulk' % self.base_url
+
+        # print data
+
+        r = requests.post(url, data)
+
+        # print json.dumps(r.json(), indent=2)
+
+        if r.status_code == 200:
+            print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling POST URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def delete(self, index):
+        url = '%s%s' % (self.base_url, index)
+        r = requests.delete(url)
+
+        if r.status_code == 200:
+            print 'DELETE (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
+            return r.json()
+
+        raise Exception('HTTP %s calling DELETE URL=[%s]: %s' % (r.status_code, url, r.text))
+
+    def create_index(self, name=None, shards=18 * 3, replicas=1):
+        data = {
+            "settings": {
+                "index": {
+                    "action": {
+                        "write_consistency": "one"
+                    },
+                    "number_of_shards": shards,
+                    "number_of_replicas": replicas
+                }
+            }
+        }
+
+        try:
+            print 'Creating index %s' % name
+            response = self.put('/%s/' % name.lower(), data)
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+    def delete_index(self, name):
+        try:
+            response = self.delete('/%s/' % name.lower())
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+    def define_type_mapping(self, index_name, type_name):
+        try:
+            url = '/%s/_mapping/%s' % (index_name, type_name)
+            print url
+
+            response = self.put(url, get_type_mapping(type_name))
+
+            print response
+
+        except Exception, e:
+            print traceback.format_exc()
+
+
+class Worker(Process):
+    def __init__(self, work_queue):
+        super(Worker, self).__init__()
+        self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
+        self.work_queue = work_queue
+        self.es = Elasticsearch(es_hosts)
+        self.sentence_list = loremipsum.get_sentences(1000)
+        self.re_first_word = re.compile('([A-z]+)')
+
+    def run(self):
+        print 'Starting %s ' % self.name
+        counter = 0
+
+        docs = {}
+
+        while True:
+            index_batch_size = args.get('batch_size')
+            task = self.work_queue.get(timeout=600)
+            counter += 1
+
+            document = self.generate_document(task['field_count'])
+            flattened_doc = self.process_document(document,
+                                                  task['type'],
+                                                  task['uuid'],
+                                                  task['uuid'])
+
+            index_type_tuple = (task['index'], task['type'])
+
+            # self.handle_document(task['index'], task['type'], task['uuid'], flattened_doc)
+
+            doc_array = docs.get(index_type_tuple)
+
+            if doc_array is None:
+                doc_array = []
+                docs[index_type_tuple] = doc_array
+
+            doc_array.append(flattened_doc)
+
+            if len(doc_array) >= index_batch_size:
+                self.handle_batch(task['index'], task['type'], doc_array)
+                doc_array = []
+
+            self.work_queue.task_done()
+
+    def generate_document(self, fields):
+
+        doc = {}
+
+        my_bool = True
+
+        for i in xrange(fields):
+            sentence_index = random.randint(0, max((fields / 2) - 1, 1))
+            sentence = self.sentence_list[sentence_index]
+
+            if random.random() >= .5:
+                key = self.re_first_word.findall(sentence)[1]
+            else:
+                key = self.re_first_word.findall(sentence)[1] + str(i)
+
+            field_type = random.random()
+
+            if field_type <= 0.3:
+                doc[key] = sentence
+
+            elif field_type <= 0.5:
+                doc[key] = random.randint(1, 1000000)
+
+            elif field_type <= 0.6:
+                doc[key] = random.random() * 1000000000
+
+            elif field_type == 0.7:
+                doc[key] = my_bool
+                my_bool = not my_bool
+
+            elif field_type == 0.8:
+                doc[key] = self.generate_document(max(fields / 5, 1))
+
+            elif field_type <= 1.0:
+                doc['mylocation'] = self.generate_location()
+
+        return doc
+
+    @staticmethod
+    def get_fields(document, base_name=None):
+        fields = []
+
+        for name, value in document.iteritems():
+            if base_name:
+                field_name = '%s.%s' % (base_name, name)
+            else:
+                field_name = name
+
+            if isinstance(value, dict):
+                fields += Worker.get_fields(value, field_name)
+            else:
+                value_name = None
+                if isinstance(value, basestring):
+                    value_name = 'string'
+
+                elif isinstance(value, bool):
+                    value_name = 'boolean'
+
+                elif isinstance(value, (int, long)):
+                    value_name = 'long'
+
+                elif isinstance(value, float):
+                    value_name = 'double'
+
+                if value_name:
+                    field = {
+                        'name': field_name,
+                        value_name: value
+                    }
+                else:
+                    field = {
+                        'name': field_name,
+                        'string': str(value)
+                    }
+
+                fields.append(field)
+
+        return fields
+
+    @staticmethod
+    def process_document(document, doc_type, application_id, uuid):
+        response = {
+            'entityId': uuid,
+            'entityVersion': '1',
+            'entityType': doc_type,
+            'applicationId': application_id,
+            'fields': Worker.get_fields(document)
+        }
+
+        return response
+
+    def handle_document(self, index, doc_type, uuid, document):
+
+        res = self.es.create(index=index,
+                             doc_type=doc_type,
+                             id=uuid,
+                             body=document)
+
+        print res
+
+    def generate_location(self):
+        response = {}
+
+        lat = random.random() * 90.0
+        lon = random.random() * 180.0
+
+        lat_neg_true = True if lon > .5 else False
+        lon_neg_true = True if lat > .5 else False
+
+        lat = lat * -1.0 if lat_neg_true else lat
+        lon = lon * -1.0 if lon_neg_true else lon
+
+        response['location'] = {
+            'lat': lat,
+            'lon': lon
+        }
+
+        return response
+
+    def handle_batch(self, index, doc_type, docs):
+        print 'HANDLE BATCH'
+        self.api_client.define_type_mapping(index, doc_type)
+        self.api_client.index_docs(index, docs, doc_type)
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_type_mapping(type_name):
+    return {
+        type_name: {
+            "_routing": {
+                "path": "entityId",
+                "required": True
+            },
+            "properties": {
+                "entityId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityVersion": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityType": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "applicationId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "nodeId": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "edgeName": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "entityNodeType": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "edgeTimestamp": {
+                    "type": "long",
+                    "doc_values": True
+                },
+                "edgeSearch": {
+                    "type": "string",
+                    "index": "not_analyzed",
+                    "doc_values": True
+                },
+                "fields": {
+                    "type": "nested",
+                    "properties": {
+                        "name": {
+                            "type": "string",
+                            "index": "not_analyzed",
+                            "doc_values": True
+                        },
+                        "boolean": {
+                            "type": "boolean",
+                            "doc_values": True
+                        },
+                        "long": {
+                            "type": "long",
+                            "doc_values": True
+                        },
+                        "double": {
+                            "type": "double",
+                            "doc_values": True
+                        },
+                        "location": {
+                            "type": "geo_point",
+                            "lat_lon": True,
+                            "geohash": True,
+                            "doc_values": True
+                        },
+                        "string": {
+                            "type": "string",
+                            "norms": {
+                                "enabled": False
+                            },
+                            "fields": {
+                                "exact": {
+                                    "type": "string",
+                                    "index": "not_analyzed",
+                                    "doc_values": True
+                                }
+                            }
+                        },
+                        "uuid": {
+                            "type": "string",
+                            "index": "not_analyzed",
+                            "doc_values": True
+                        }
+                    }
+                }
+            },
+            "_all": {
+                "enabled": False
+            }
+
+        }
+    }
+
+
+def main():
+    INDEX_COUNT = args.get('index_count')
+    TYPE_COUNT = args.get('type_count')
+    SETUP = args.get('setup')
+
+    indices = []
+    types = []
+    work_queue = JoinableQueue()
+
+    apiclient = APIClient('http://%s:9200' % es_hosts[random.randint(1, len(es_hosts) - 1)].get('host'))
+
+    workers = [Worker(work_queue) for x in xrange(args.get('workers'))]
+    [worker.start() for worker in workers]
+
+    try:
+        #
+        for x in xrange(TYPE_COUNT):
+            type_name = '%s_%s' % (args.get('type_prefix'), x)
+            types.append(type_name)
+
+        for x in xrange(INDEX_COUNT):
+            index_name = '%s_%s' % (args.get('index_prefix'), x)
+            indices.append(index_name)
+
+        if SETUP:
+            print 'Running setup...'
+
+            for index_name in indices:
+                apiclient.delete_index(index_name)
+
+            time.sleep(5)
+
+            for index_name in indices:
+                apiclient.create_index(
+                    index_name,
+                    shards=args['shard_count'],
+                    replicas=args['replica_count'])
+
+                # time.sleep(5)
+
+                # for index_name in indices:
+                # for type_name in types:
+                # apiclient.define_type_mapping(index_name, type_name)
+
+                # time.sleep(5)
+
+        total_messages = args.get('document_count')
+        batch_size = 100000
+        message_counter = 0
+        fields = random.randint(50, 100)
+
+        while message_counter < total_messages:
+
+            for count in xrange(batch_size):
+
+                for index_name in indices:
+                    doc_id = str(uuid.uuid1())
+
+                    task = {
+                        'field_count': fields,
+                        'uuid': doc_id,
+                        'index': index_name,
+                        'type': types[random.randint(0, len(types) - 1)]
+                    }
+
+                    work_queue.put(task)
+
+            print 'Joining queue counter=[%s]...' % message_counter
+            work_queue.join()
+            print 'Done queue counter=[%s]...' % message_counter
+            message_counter += batch_size
+
+    except KeyboardInterrupt:
+        [worker.terminate() for worker in workers]
+
+
+main()
diff --git a/utils/usergrid-util-python/requirements.txt b/utils/usergrid-util-python/requirements.txt
new file mode 100644
index 0000000..d15d7be
--- /dev/null
+++ b/utils/usergrid-util-python/requirements.txt
@@ -0,0 +1,4 @@
+urllib3
+usergrid
+requests
+redis
diff --git a/utils/usergrid-util-python/samples/activity_streams/activity_streams.py b/utils/usergrid-util-python/samples/activity_streams/activity_streams.py
new file mode 100644
index 0000000..b838485
--- /dev/null
+++ b/utils/usergrid-util-python/samples/activity_streams/activity_streams.py
@@ -0,0 +1,154 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+# docs page: http://docs.apigee.com/api-baas/content/creating-activity
+
+# create user 1
+# post event for user 1
+# check feed for user 1
+
+# create user 2
+# user 2 follows user 1
+# post event for user 1
+
+# check feed for user 1
+# check feed for user 2
+import json
+
+import requests
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+entity_url_template = "{api_url}/{org}/{app}/{collection}/{entity_id}"
+connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}"
+connection_create_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}"
+
+user_url_template = "{api_url}/{org}/{app}/users/{username}"
+user_feed_url_template = "{api_url}/{org}/{app}/users/{username}/feed"
+user_activity_url_template = "{api_url}/{org}/{app}/users/{username}/activities"
+user_follows_url_template = "{api_url}/{org}/{app}/users/{user2}/following/users/{user1}"
+
+url_data = {
+    'api_url': 'https://amer-apibaas-prod.apigee.net/appservices',
+    'org': 'jwest-samples',
+    'app': 'feed-example'
+}
+
+session = requests.Session()
+
+
+def create_user(user):
+    data = {
+        'username': user,
+        'email': '%s@example.com' % user
+    }
+
+    url = collection_url_template.format(collection='users', **url_data)
+
+    r = session.post(url, json.dumps(data))
+
+    if r.status_code != 200:
+        print 'Error creating user [%s] at URL=[%s]: %s' % (user, url, r.text)
+
+
+def post_activity(user, text):
+    activity = {
+        "actor": {
+            "displayName": user,
+            "username": user,
+            "image": {
+                "duration": 0,
+                "height": 80,
+                "url": "http://www.gravatar.com/avatar/", "width": 80},
+            "email": "%s@example.com" % user
+        },
+        "verb": "post",
+        "content": text
+    }
+
+    url = user_activity_url_template.format(username=user, **url_data)
+
+    r = session.post(url, json.dumps(activity))
+
+    if r.status_code != 200:
+        print 'Error creating activity for user [%s] at URL=[%s]: %s' % (user, url, r.text)
+
+
+def get_feed(user):
+    url = user_feed_url_template.format(username=user, **url_data)
+
+    r = session.get(url)
+
+    if r.status_code != 200:
+        print 'Error getting feed for user [%s] at URL=[%s]: %s' % (user, url, r.text)
+
+    else:
+        print '----- START'
+        print json.dumps(r.json(), indent=2)
+        print '----- END'
+
+
+def create_follows(user, user_to_follow):
+    url = user_follows_url_template.format(user1=user, user2=user_to_follow, **url_data)
+
+    r = session.post(url)
+
+    print r.text
+
+    if r.status_code != 200:
+        print 'Error getting creating follows from user [%s] to user [%s] at URL=[%s]: %s' % (
+            user, user_to_follow, url, r.text)
+
+
+def delete_user(username):
+    url = user_url_template.format(username=username, **url_data)
+
+    r = session.post(url)
+
+    # print r.text
+
+    if r.status_code != 200:
+        print 'Error deleting user [%s] at URL=[%s]: %s' % (username, url, r.text)
+
+
+user_base = 'natgeo'
+
+user1 = '%s_%s' % (user_base, 1)
+user2 = '%s_%s' % (user_base, 2)
+
+create_user(user1)
+post_activity(user1, 'Hello World!')
+
+get_feed(user1)
+
+create_user(user2)
+create_follows(user2, user1)
+post_activity(user2, "I'm here!")
+get_feed(user2)
+
+post_activity(user1, 'SEE YA!!')
+
+get_feed(user2)
+
+get_feed(user1)
+
+delete_user(user1)
+delete_user(user2)
diff --git a/utils/usergrid-util-python/samples/beacon-event-example.py b/utils/usergrid-util-python/samples/beacon-event-example.py
new file mode 100644
index 0000000..e1c3efe
--- /dev/null
+++ b/utils/usergrid-util-python/samples/beacon-event-example.py
@@ -0,0 +1,238 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+# URL Templates for Usergrid
+# 
+# Get all events for a user:
+#     https://usergrid.net/beacon-sample/event-example/users/jeff/events
+# 
+# Get only enterStore events:
+# https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘enterStore'
+# 
+# Get/filter beacon events for a user:
+#     https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon'
+# 
+# Get latest beacon event for user:
+#     https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon’&limit=1
+# 
+# Beacon events for store:
+#     https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon'
+# 
+# All events for store:
+#     https://usergrid.net/beacon-sample/event-example/stores/store_123/events
+# 
+# All events for a beacon:
+#     https://usergrid.net/beacon-sample/event-example/beacons/store_456-b2/events
+# 
+# Get Users who passed a specific beacon:
+# https://usergrid.net/beacon-sample/event-example/beacons/3fd4fccb-d43b-11e5-978a-123320acb31f/events;ql=select%20* where profile=1/connecting/events/users
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+import json
+import random
+
+import requests
+from multiprocessing import Process, Pool
+
+import time
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+entity_url_template = "{api_url}/{org}/{app}/{collection}/{entity_id}"
+connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}"
+connection_create_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}"
+
+url_data = {
+    'api_url': 'https://usergridhost/basepath',
+    'org': 'samples',
+    'app': 'event-example'
+}
+
+
+session = requests.Session()
+
+
+class EventGenerator(Process):
+    def __init__(self, store_id, event_count, user_array, beacons):
+        super(EventGenerator, self).__init__()
+
+        self.store_id = store_id
+        self.user_array = user_array
+        self.event_count = event_count
+        self.beacons = beacons
+        self.session = requests.Session()
+        self.create_store(self.store_id)
+        self.create_users(self.user_array)
+
+    def create_store(self, store_id):
+        url = entity_url_template.format(collection='stores', entity_id=store_id, **url_data)
+
+        r = self.session.put(url, data=json.dumps({"name": store_id}))
+
+        if r.status_code != 200:
+            print 'Error creating store [%s] at URL=[%s]: %s' % (store_id, url, r.text)
+
+    def create_event(self, user, event):
+        print 'creating event: %s' % json.dumps(event)
+
+        url = collection_url_template.format(collection='general-events', **url_data)
+
+        r = self.session.post(url, data=json.dumps(event))
+
+        if r.status_code == 200:
+            res = r.json()
+            entity = res.get('entities')[0]
+            event_uuid = entity.get('uuid')
+
+            # link to user
+            create_connection_url = connection_create_url_template.format(collection='users',
+                                                                          uuid=user,
+                                                                          verb='events',
+                                                                          target_uuid=event_uuid,
+                                                                          **url_data)
+
+            r_connect = self.session.post(create_connection_url)
+
+            if r_connect.status_code == 200:
+                print 'created connection: %s' % create_connection_url
+
+            # link to store
+            create_connection_url = connection_create_url_template.format(collection='stores',
+                                                                          uuid=event.get('storeId'),
+                                                                          verb='events',
+                                                                          target_uuid=event_uuid,
+                                                                          **url_data)
+
+            r_connect = self.session.post(create_connection_url)
+
+            if r_connect.status_code == 200:
+                print 'created connection: %s' % create_connection_url
+
+            if event.get('eventType') == 'beacon':
+                # link to beacon
+                create_connection_url = connection_create_url_template.format(collection='beacons',
+                                                                              uuid=event.get('beaconId'),
+                                                                              verb='events',
+                                                                              target_uuid=event_uuid,
+                                                                              **url_data)
+
+                r_connect = self.session.post(create_connection_url)
+
+                if r_connect.status_code == 200:
+                    print 'created connection: %s' % create_connection_url
+                else:
+                    print 'Error creating connection at URL=[%s]: %s' % (create_connection_url, r.text)
+
+    def run(self):
+
+        for user in self.user_array:
+
+            # store 123
+            self.create_event(user, {
+                'storeId': self.store_id,
+                'eventType': 'enterStore'
+            })
+
+            for x in xrange(0, self.event_count):
+                beacon_number = random.randint(0, len(self.beacons) - 1)
+                beacon_name = self.beacons[beacon_number]
+
+                event = {
+                    'beaconId': '%s-%s' % (self.store_id, beacon_name),
+                    'storeId': self.store_id,
+                    'eventType': 'beacon'
+                }
+
+                self.create_event(user, event)
+
+            self.create_event(user, {
+                'storeId': self.store_id,
+                'eventType': 'exitStore'
+            })
+
+    def create_users(self, user_array):
+        for user in user_array:
+            self.create_user(user)
+
+    def create_user(self, user):
+        data = {
+            'username': user,
+            'email': '%s@example.com' % user
+        }
+
+        url = collection_url_template.format(collection='users', **url_data)
+
+        r = self.session.post(url, json.dumps(data))
+
+        if r.status_code != 200:
+            print 'Error creating user [%s] at URL=[%s]: %s' % (user, url, r.text)
+
+
+def create_entity(entity_type, entity_name):
+    url = entity_url_template.format(collection=entity_type, entity_id=entity_name, **url_data)
+    r = session.put(url, data=json.dumps({'name': entity_name}))
+
+    if r.status_code != 200:
+        print 'Error creating %s [%s] at URL=[%s]: %s' % (entity_type, entity_name, url, r.text)
+
+
+def create_beacon(beacon_name):
+    create_entity('beacons', beacon_name)
+
+
+def create_store(store_name):
+    create_entity('stores', store_name)
+
+
+def main():
+    beacons = ["b1", "b2", "b3", "b4", "b5", "b6"]
+
+    stores = ['store_123', 'store_456', 'store_789', 'store_901']
+
+    beacon_names = []
+
+    for store in stores:
+        for beacon in beacons:
+            beacon_names.append('%s-%s' % (store, beacon))
+
+    pool = Pool(16)
+
+    pool.map(create_beacon, beacon_names)
+    pool.map(create_store, stores)
+
+    processes = [
+        EventGenerator(stores[0], 100, ['jeff', 'julie'], beacons=beacons),
+        EventGenerator(stores[0], 100, ['russo', 'dunker'], beacons=beacons),
+        EventGenerator(stores[2], 100, ['jeff', 'julie'], beacons=beacons),
+        EventGenerator(stores[2], 100, ['russo', 'dunker'], beacons=beacons),
+        EventGenerator(stores[3], 100, ['jeff', 'julie'], beacons=beacons),
+        EventGenerator(stores[3], 100, ['russo', 'dunker'], beacons=beacons),
+        EventGenerator(stores[1], 100, ['bala', 'shankar'], beacons=beacons),
+        EventGenerator(stores[1], 100, ['chet', 'anant'], beacons=beacons)
+    ]
+
+    [p.start() for p in processes]
+
+    while len([p for p in processes if p.is_alive()]) > 0:
+        print 'Processors active, waiting'
+        time.sleep(1)
+
+
+main()
diff --git a/utils/usergrid-util-python/samples/counter_test.py b/utils/usergrid-util-python/samples/counter_test.py
new file mode 100644
index 0000000..f5276c4
--- /dev/null
+++ b/utils/usergrid-util-python/samples/counter_test.py
@@ -0,0 +1,52 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import time
+import json
+
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+tstamp = time.gmtime() * 1000
+
+s = requests.Session()
+
+s.headers.update({'authorization': 'Bearer TOKEN'})
+s.headers.update({'content-type': 'application/json'})
+
+url = 'https://host/appservices-new/usergrid/pushtest/events'
+
+body = {
+    "timestamp": tstamp,
+    "counters": {
+        "counters.jeff.west": 1
+    }
+}
+
+r = s.post(url, data=json.dumps(body))
+
+print r.status_code
+
+time.sleep(30)
+
+r = s.get('https://host/appservices-new/usergrid/pushtest/counters?counter=counters.jeff.west')
+
+print r.text
diff --git a/utils/usergrid-util-python/setup.py b/utils/usergrid-util-python/setup.py
new file mode 100755
index 0000000..337c914
--- /dev/null
+++ b/utils/usergrid-util-python/setup.py
@@ -0,0 +1,59 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from setuptools import setup, find_packages
+
+__author__ = 'Jeff.West@yahoo.com'
+
+VERSION = '0.5.13'
+
+setup(
+        name='usergrid-tools',
+        version=VERSION,
+        description='Tools for working with Apache Usergrid',
+        url='http://usergrid.apache.org',
+        download_url="https://codeload.github.com/jwest-apigee/usergrid-util-python/zip/%s" % VERSION,
+        author='Jeff West',
+        author_email='jwest@apigee.com',
+
+        # packages=['usergrid_tools', 'es_tools'],
+        packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "sandbox"]),
+
+        install_requires=[
+            'requests',
+            'usergrid>=0.1.3',
+            'time_uuid',
+            'argparse',
+            'redis',
+            'ConcurrentLogHandler',
+        ],
+
+        entry_points={
+            'console_scripts': [
+                'usergrid_iterator = usergrid_tools.iterators.simple_iterator:main',
+                'usergrid_data_migrator = usergrid_tools.migration.usergrid_data_migrator:main',
+                'usergrid_data_exporter = usergrid_tools.migration.usergrid_data_exporter:main',
+                'usergrid_entity_index_test = usergrid_tools.indexing.entity_index_test:main',
+                'usergrid_batch_index_test = usergrid_tools.indexing.batch_index_test:main',
+                'usergrid_parse_importer = usergrid_tools.parse_importer.parse_importer:main',
+                'usergrid_deleter = usergrid_tools.parse_importer.parse_importer:main',
+                'usergrid_library_check = usergrid_tools.library_check:main',
+            ]
+        }
+)
diff --git a/utils/usergrid-util-python/usergrid_tools/__init__.py b/utils/usergrid-util-python/usergrid_tools/__init__.py
new file mode 100644
index 0000000..beed654
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/__init__.py
@@ -0,0 +1,4 @@
+import migration
+import iterators
+import indexing
+import general
diff --git a/utils/usergrid-util-python/usergrid_tools/general/__init__.py b/utils/usergrid-util-python/usergrid_tools/general/__init__.py
new file mode 100644
index 0000000..3b2a4e0
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/__init__.py
@@ -0,0 +1,21 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/utils/usergrid-util-python/usergrid_tools/general/deleter.py b/utils/usergrid-util-python/usergrid_tools/general/deleter.py
new file mode 100644
index 0000000..a62b6e3
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/deleter.py
@@ -0,0 +1,170 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import traceback
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+# for Apigee Developer, leave this as is.  For paid BaaS instances change this to https://{your_api_url}/[appservices]
+api_url = 'https://api.usergrid.com'
+
+# specify the org[] / app[] / collection[] to delete
+# Org and App level are required.  If no collections are specified, all collections will be deleted
+# you also need to specify the client_id and secret of each org
+
+data_map = {
+    "orgs":
+        {
+            "myOrg": {
+                "apps": {
+                    "myApp": {
+                        "collections": [
+                            'examples'
+                        ]
+                    }
+                },
+                "credentials": {
+                    "client_id": "foo",
+                    "client_secret": "bar"
+                }
+            }
+        }
+}
+# it is generally not a good idea to delete more than 100 at a time due to latency and resource utilization
+url_template = '{api_url}/{org}/{app}/{collection}?limit=250'
+
+session = requests.Session()
+
+
+def check_response_status(response, message='', exit_on_error=True):
+    if response.status_code != 200:
+        print 'ERROR: ' + message
+        print response.text
+
+        if exit_on_error:
+            exit()
+
+
+def delete_all_collections(org, app, token):
+    url = '{api_url}/{org}/{app}'.format(api_url=api_url, org=org, app=app)
+
+    print 'Listing collections at URL: %s' % url
+
+    r = session.get(url)
+
+    if r.status_code != 200:
+        print r.text
+
+    collections = []
+
+    delete_collections(org, app, collections, token)
+
+
+def delete_collections(org, app, collections, token):
+    print 'Deleting [%s] collections: %s' % (len(collections), collections)
+
+    for collection in collections:
+        print 'Deleting collection [%s]...' % collection
+
+        keep_going = True
+
+        count_with_zero = 0
+
+        while keep_going:
+
+            url = url_template.format(api_url=api_url, org=org, app=app, collection=collection)
+
+            try:
+                response = session.get(url)
+                check_response_status(response, message='Unable to GET URL: %s' % url)
+
+                count = len(response.json().get('entities'))
+                total_ms = total_milliseconds(response.elapsed)
+
+                print 'GET %s from collection %s in %s' % (count, collection, total_ms)
+                print 'Deleting...'
+
+                response = session.delete(url)
+
+                check_response_status(response, message='UNABLE TO DELETE on URL: %s' % url)
+
+                try:
+                    count = len(response.json().get('entities'))
+                    total_ms = total_milliseconds(response.elapsed)
+
+                    print 'Deleted %s from collection %s in %s' % (count, collection, total_ms)
+
+                    if count == 0:
+                        count_with_zero += 1
+                        print 'Count with ZERO: %s' % count_with_zero
+
+                        # if there are 10 in a row with zero entities returned, we're done
+                        if count_with_zero >= 10:
+                            keep_going = False
+                    else:
+                        count_with_zero = 0
+                except:
+                    print 'Error! HTTP Status: %s response: %s' % (response.status_code, response.text)
+
+            except KeyboardInterrupt:
+                exit()
+
+            except:
+                print traceback.format_exc()
+
+
+# iterate the orgs specified in the configuration above
+for org, org_data in data_map.get('orgs', {}).iteritems():
+
+    credentials = org_data.get('credentials', {})
+
+    token_request = {
+        'grant_type': 'client_credentials',
+        'client_id': credentials.get('client_id'),
+        'client_secret': credentials.get('client_secret'),
+    }
+
+    token_url = '{api_url}/management/token'.format(api_url=api_url)
+
+    r = session.post(token_url, data=json.dumps(token_request))
+
+    check_response_status(r, message='Unable to get Token at URL %s' % token_url)
+
+    token = r.json().get('access_token')
+    session.headers.update({'Authorization': 'Bearer ' + token})
+
+    # iterate the apps specified in the config above
+    for app, app_data in org_data.get('apps', {}).iteritems():
+
+        collections = app_data.get('collections', [])
+
+        # if the list of collections is empty, delete all collections
+        if len(collections) == 0:
+            delete_all_collections(org, app, token)
+
+        # Otherwise, delete the specified collections
+        else:
+            delete_collections(org, app, collections, token)
diff --git a/utils/usergrid-util-python/usergrid_tools/general/duplicate_name_checker.py b/utils/usergrid-util-python/usergrid_tools/general/duplicate_name_checker.py
new file mode 100644
index 0000000..6957290
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/duplicate_name_checker.py
@@ -0,0 +1,47 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from usergrid import UsergridQueryIterator
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+### This iterates a collection using GRAPH and checks whether there are more than on entity with the same name
+
+url = 'https://host/org/app/collection?access_token=foo&limit=1000'
+
+q = UsergridQueryIterator(url)
+
+name_tracker = {}
+counter = 0
+for e in q:
+    counter += 1
+
+    if counter % 1000 == 1:
+        print 'Count: %s' % counter
+
+    name = e.get('name')
+
+    if name in name_tracker:
+        name_tracker[name].append(e.get('uuid'))
+
+        print 'duplicates for name=[%s]: %s' % (name, name_tracker[name])
+
+    else:
+        name_tracker[name] = [e.get('uuid')]
diff --git a/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py b/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py
new file mode 100644
index 0000000..98420fb
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py
@@ -0,0 +1,138 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import argparse
+import json
+import datetime
+import os
+import time
+import sys
+
+import boto
+from boto import sqs
+
+### This monitors an SQS queue and measures the delta message count between polling intervals to infer the amount of time
+### remaining to fully drain the queue
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_time_remaining(count, rate):
+    if rate == 0:
+        return 'NaN'
+
+    seconds = count * 1.0 / rate
+
+    m, s = divmod(seconds, 60)
+    h, m = divmod(m, 60)
+
+    return "%d:%02d:%02d" % (h, m, s)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
+
+    parser.add_argument('-c', '--config',
+                        help='The queue to load into',
+                        type=str,
+                        default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
+
+    parser.add_argument('-q', '--queue_name',
+                        help='The queue name to send messages to.  If not specified the filename is used',
+                        default='entities',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    print str(my_args)
+
+    return vars(my_args)
+
+
+def main():
+
+    args = parse_args()
+
+    queue_name = args.get('queue_name')
+
+    print 'queue_name=%s' % queue_name
+
+    start_time = datetime.datetime.utcnow()
+    first_start_time = start_time
+
+    print "first start: %s" % first_start_time
+
+    with open(args.get('config'), 'r') as f:
+        config = json.load(f)
+
+    sqs_config = config.get('sqs')
+    last_time = datetime.datetime.utcnow()
+
+    sqs_conn = boto.sqs.connect_to_region(**sqs_config)
+
+    queue = sqs_conn.get_queue(queue_name)
+
+    last_size = queue.count()
+    first_size = last_size
+
+    print 'Starting Size: %s' % last_size
+
+    sleep = 10
+    time.sleep(sleep)
+    rate_sum = 0
+    rate_count = 0
+
+    while True:
+        size = queue.count()
+        time_stop = datetime.datetime.utcnow()
+
+        time_delta = total_seconds(time_stop - last_time)
+        agg_time_delta = total_seconds(time_stop - first_start_time)
+        agg_size_delta = first_size - size
+        agg_messages_rate = 1.0 * agg_size_delta / agg_time_delta
+
+        size_delta = last_size - size
+        messages_rate = 1.0 * size_delta / time_delta
+        rate_sum += messages_rate
+        rate_count += 1
+
+        print '%s | %s | Size: %s | Processed: %s | Last: %s | Avg: %s | Count: %s | agg rate: %s | Remaining: %s' % (
+            datetime.datetime.utcnow(),
+            queue_name,
+            size, size_delta, round(messages_rate, 2),
+            round(rate_sum / rate_count, 2), rate_count,
+            round(agg_messages_rate, 2),
+            get_time_remaining(size, agg_messages_rate))
+
+        last_size = size
+        last_time = time_stop
+
+        time.sleep(sleep)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/general/url_tester.py b/utils/usergrid-util-python/usergrid_tools/general/url_tester.py
new file mode 100644
index 0000000..6e3bef8
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/url_tester.py
@@ -0,0 +1,108 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import datetime
+import time
+import numpy
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+# This will call a URL over and over to check the latency of the call
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+url_template = "{protocol}://{host}:{port}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}"
+
+environments = {
+
+    'local': {
+        'protocol': 'http',
+        'host': 'localhost',
+        'port': 8080,
+        'org': 'myOrg',
+        'app': 'myApp',
+        'collection': 'myEntities',
+        'ql': 'select *',
+        'client_id': '<<client_id>>',
+        'client_secret': '<<client_secret>>'
+    }
+}
+
+ENV = 'local'
+
+data = environments.get(ENV)
+if data is None:
+    print 'didn\'t find map entry for data'
+    exit(1)
+
+x = 0
+
+SLEEP = .5
+count_under_one = 0.0
+count_over = 0.0
+percent_under_one = 100.0
+total_time = 0
+
+print url_template.format(**data)
+
+response_times = []
+
+while True:
+    x += 1
+    target_url = url_template.format(**data)
+
+    r = requests.get(url=target_url)
+
+    response_time = total_milliseconds(r.elapsed)
+    total_time += response_time
+
+    # print '%s / %s' % (r.elapsed, total_milliseconds(r.elapsed))
+
+    the_date = datetime.datetime.utcnow()
+
+    if r.status_code != 200:
+        print '%s | %s: %s in %s |  %s' % (the_date, x, r.status_code, response_time, r.text)
+    else:
+        response_times.append(response_time)
+
+        if response_time < 2000:
+            count_under_one += 1
+        elif response_time > 10000:
+            count_over += 1
+
+        percent_under_one = round(100 * (count_under_one / x), 2)
+        percent_over = round(100 * (count_over / x), 2)
+
+        # print '%s | %s: %s in %s | Count: %s | Avg: %s | under 2s: %s / %s%% | over 10s: %s / %s%%' % (
+        # the_date, x, r.status_code, response_time, len(r.json().get('entities')), (total_time / x), count_under_one,
+        # percent_under_one, count_over, percent_over)
+
+        print '%s | %s: %s in %s | Count: %s | Avg: %s | 99th: %s | 90th: %s | 50th: %s | 75th: %s | 25th: %s' % (
+            the_date, x, r.status_code, response_time, r.json().get('count'), (total_time / x),
+
+            numpy.percentile(response_times, 99),
+            numpy.percentile(response_times, 90),
+            numpy.percentile(response_times, 75),
+            numpy.percentile(response_times, 50),
+            numpy.percentile(response_times, 25))
+
+    time.sleep(SLEEP)
diff --git a/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py b/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py
new file mode 100644
index 0000000..8879b44
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/general/user_confirm_activate.py
@@ -0,0 +1,51 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+# This will make the API calls to activate and confirm an array of users
+
+users = [
+    'user1@example.com',
+    'user2@example.com'
+]
+
+TOKEN = 'ABC123'
+URL = "http://localhost:8080/management/users/%s"
+
+s = requests.Session()
+s.headers.update({'authorization': 'Bearer %s' % TOKEN})
+
+for user in users:
+
+    r = s.put(URL % user, data=json.dumps({"activated": True}))
+    print 'Activated %s: %s' % (user, r.status_code)
+
+    if r.status_code != 200:
+        print r.text
+        continue
+
+    r = s.put(URL % user, data=json.dumps({"confirmed": True}))
+
+    print 'Confirmed %s: %s' % (user, r.status_code)
diff --git a/utils/usergrid-util-python/usergrid_tools/groups/__init__.py b/utils/usergrid-util-python/usergrid_tools/groups/__init__.py
new file mode 100644
index 0000000..cb3e030
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/groups/__init__.py
@@ -0,0 +1,2 @@
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py b/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py
new file mode 100644
index 0000000..ba031f3
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/groups/big_group_creater.py
@@ -0,0 +1,100 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import traceback
+from multiprocessing import Pool
+
+import datetime
+import urllib3
+
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+group_name = 'precisely-10k'
+users = 10000
+username_template = 'precisely-10k-%s'
+
+url_data = {
+    "api_url": "https://usergrid.net",
+    "org": "org",
+    "app": "sandbox",
+    "client_id": "",
+    "client_secret": "",
+
+}
+
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+add_user_url_template = "{api_url}/{org}/{app}/groups/{group_name}/users/{uuid}"
+
+
+def create_group(name):
+    url = collection_url_template.format(collection='groups', **url_data)
+    print url
+    r = requests.post(url, data=json.dumps({"path": name, "name": name}))
+
+    if r.status_code not in [200, 400]:
+        print r.text
+        exit()
+
+
+def create_user(username):
+    url = collection_url_template.format(collection='users', **url_data)
+    r = requests.post(url, data=json.dumps({"username": username}))
+
+    if r.status_code not in [200, 400]:
+        print r.text
+        exit()
+
+    print 'Created user %s' % username
+
+
+def map_user(username):
+    try:
+        url = add_user_url_template.format(group_name=group_name, uuid=username, **url_data)
+        r = requests.post(url, data=json.dumps({"username": username}))
+
+        if r.status_code != 200:
+            print r.text
+            exit()
+
+        print 'Mapped user %s' % username
+    except:
+        print traceback.format_exc()
+
+
+user_names = [username_template % i for i in xrange(0, users)]
+
+pool = Pool(64)
+
+start = datetime.datetime.utcnow()
+pool.map(create_user, user_names)
+
+create_group(group_name)
+
+pool.map(map_user, user_names)
+
+finish = datetime.datetime.utcnow()
+
+td = finish - start
+
+print td
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/README.md b/utils/usergrid-util-python/usergrid_tools/indexing/README.md
new file mode 100644
index 0000000..e938c28
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/README.md
@@ -0,0 +1,22 @@
+# Usergrid Indexing Latency Tester
+
+
+# Overview
+
+Indexing of data (to Elasticsearch) in Usergrid is done asynchronously, while persistence (to Cassandra) is done synchronously within the context of an API call.  This means that you can immediately get your data back by UUID but if you use `GET /org/app/collection?ql=select * where field='value'` it is not instantly indexed.  The typical delay is ~25ms.
+
+The purpose of this tool is to test the indexing latency within Usergrid.
+
+```
+$ usergrid_index_test -h
+
+usage: usergrid_index_test [-h] -o ORG -a APP --base_url BASE_URL
+
+Usergrid Indexing Latency Test
+
+optional arguments:
+  -h, --help           show this help message and exit
+  -o ORG, --org ORG    Name of the org to perform the test in
+  -a APP, --app APP    Name of the app to perform the test in
+  --base_url BASE_URL  The URL of the Usergrid Instance
+```
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py b/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py
new file mode 100644
index 0000000..3b2a4e0
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/__init__.py
@@ -0,0 +1,21 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py b/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py
new file mode 100644
index 0000000..8b1aae6
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/batch_index_test.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import logging
+import traceback
+from multiprocessing import Pool
+import datetime
+import socket
+
+import argparse
+import requests
+import time
+from logging.handlers import RotatingFileHandler
+
+import sys
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+entity_template = {
+    "id": "replaced",
+    "dataType": "entitlements",
+    "mockData": [
+        {"importDate": "2015-08-25T23:33:57.124Z", "rowsImported": 2},
+        {"role": "line-owner", "route": "/master", "element": "element1", "entitlementId": "entitlement4",
+         "property": "show"},
+        {"role": "line-owner", "route": "/master", "element": "element2", "entitlementId": "entitlement8",
+         "property": "hide"}
+    ],
+    "nullArray1": [None],
+    "nullArray2": [None, None],
+    "nullArray3": [None, None],
+    "nest1": {
+        "nest2": {
+            "nest3": [None, None, 'foo']
+        }
+    }
+}
+
+entity_template = {
+    "type": "customerstatuses",
+    "created": 1454769737888,
+    "modified": 1454781811473,
+    "address": {
+        "zip": "35873",
+        "city": "málaga",
+        "street": "3430 calle de bravo murillo",
+        "state": "melilla"
+    },
+    "DOB": "787264244",
+    "email": "begoña.caballero29@example.com",
+    "firstName": "Begoña",
+    "lastName": "Caballero",
+    "lastSeenDateTime": 1447737158857,
+    "locationStatus": "Entrance",
+    "loyaltyAccountNumber": "1234",
+    "loyaltyLevel": "basic",
+    "phone": "966-450-469",
+    "profilePictureUrl": "http://api.randomuser.me/portraits/thumb/women/61.jpg",
+    "status": "Entrance",
+    "storeId": 12121
+}
+
+url_template = '{api_url}/{org}/{app}/{collection}'
+token_url_template = '{api_url}/{org}/{app}/token'
+
+config = {}
+
+session = requests.Session()
+
+logger = logging.getLogger('UsergridBatchIndexTest')
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './usergrid_index_test.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=2048576000,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+def create_entity(work_item):
+    global config
+    try:
+        url = work_item[0]
+        entity = work_item[1]
+
+        # entity['name'] = datetime.datetime.now().strftime('name-%yx%mx%dx%Hx%Mx%S')
+
+        logger.info('creating entity [%s] at URL [%s]' % (entity.get('id'), url))
+
+        r = session.post(url, data=json.dumps(entity))
+
+        if r.status_code != 200:
+            logger.error('HTTP %s: %s' % (r.status_code, r.text))
+            print 'HTTP %s: %s' % (r.status_code, r.text)
+            return
+
+        entities = r.json().get('entities', [])
+        uuid = entities[0].get('uuid')
+
+        if r.status_code != 200:
+            logger.info('%s: %s' % (r.status_code, uuid))
+        else:
+            logger.info('Created entity UUID=[%s] at URL [%s]' % (uuid, url))
+
+        return uuid, entity
+
+    except Exception, e:
+        print traceback.format_exc(e)
+
+
+def test_multiple(number_of_entities):
+    global config
+
+    start = datetime.datetime.now()
+
+    logger.info('Creating %s entities w/ url=%s' % (number_of_entities, config['url']))
+    created_map = {}
+
+    work_items = []
+
+    for x in xrange(1, number_of_entities + 1):
+        entity = entity_template.copy()
+        entity['id'] = str(x)
+        work_items.append((config['url'], entity))
+
+    responses = processes.map(create_entity, work_items)
+
+    for res in responses:
+        if len(res) > 0:
+            created_map[res[0]] = res[1]
+
+    stop = datetime.datetime.now()
+
+    logger.info('Created [%s] entities in %s' % (number_of_entities, (stop - start)))
+
+    return created_map
+
+
+def wait_for_indexing(created_map, q_url, sleep_time=0.0):
+    logger.info('Waiting for indexing of [%s] entities...' % len(created_map))
+
+    count_missing = 100
+    start_time = datetime.datetime.now()
+
+    while count_missing > 0:
+
+        entity_map = {}
+        r = session.get(q_url)
+        res = r.json()
+        entities = res.get('entities', [])
+
+        now_time = datetime.datetime.now()
+        elapsed = now_time - start_time
+
+        logger.info('Found [%s] of [%s] ([%s] missing) after [%s] entities at url: %s' % (
+            len(entities), len(created_map), (len(created_map) - len(entities)), elapsed, q_url))
+
+        count_missing = 0
+
+        for entity in entities:
+            entity_map[entity.get('uuid')] = entity
+
+        for uuid, created_entity in created_map.iteritems():
+            if uuid not in entity_map:
+                count_missing += 1
+                logger.info('Missing uuid=[%s] Id=[%s] total missing=[%s]' % (
+                    uuid, created_entity.get('id'), count_missing))
+
+        if count_missing > 0:
+            logger.info('Waiting for indexing, count_missing=[%s] Total time [%s] Sleeping for [%s]s' % (
+                elapsed, count_missing, sleep_time))
+
+            time.sleep(sleep_time)
+
+    stop_time = datetime.datetime.now()
+    logger.info('All entities found after %s' % (stop_time - start_time))
+
+
+def clear(clear_url):
+    logger.info('deleting.... ' + clear_url)
+
+    r = session.delete(clear_url)
+
+    if r.status_code != 200:
+        logger.info('error deleting url=' + clear_url)
+        logger.info(json.dumps(r.json()))
+
+    else:
+        res = r.json()
+        len_entities = len(res.get('entities', []))
+
+        if len_entities > 0:
+            clear(clear_url)
+
+
+def test_cleared(q_url):
+    r = session.get(q_url)
+
+    if r.status_code != 200:
+        logger.info(json.dumps(r.json()))
+    else:
+        res = r.json()
+
+        if len(res.get('entities', [])) != 0:
+            logger.info('DID NOT CLEAR')
+
+
+processes = Pool(32)
+
+
+def test_url(q_url, sleep_time=0.25):
+    test_var = False
+
+    while not test_var:
+        r = session.get(q_url)
+
+        if r.status_code == 200:
+
+            if len(r.json().get('entities')) >= 1:
+                test_var = True
+        else:
+            logger.info('non 200')
+
+        if test_var:
+            logger.info('Test of URL [%s] Passes')
+        else:
+            logger.info('Test of URL [%s] Passes')
+            time.sleep(sleep_time)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Indexing Latency Test')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of the app to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--base_url',
+                        help='The URL of the Usergrid Instance',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--client_id',
+                        help='The Client ID to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--client_secret',
+                        help='The Client Secret to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    url_data = {
+        'api_url': config.get('base_url'),
+        'org': config.get('org'),
+        'app': config.get('app'),
+        'collection': '%s-%s' % (socket.gethostname(), datetime.datetime.now().strftime('index-test-%yx%mx%dx%Hx%Mx%S'))
+    }
+
+    config['url'] = url_template.format(**url_data)
+    config['token_url'] = token_url_template.format(**url_data)
+
+
+def main():
+    global config
+
+    # processes = Pool(32)
+
+    config = parse_args()
+
+    init()
+
+    init_logging()
+
+    if config.get('client_id') is not None and config.get('client_secret') is not None:
+        token_request = {
+            'grant_type': 'client_credentials',
+            'client_id': config.get('client_id'),
+            'client_secret': config.get('client_secret')
+        }
+
+        r = session.post(config.get('token_url'), json.dumps(token_request))
+
+        if r.status_code == 200:
+            access_token = r.json().get('access_token')
+            session.headers.update({'Authorization': 'Bearer %s' % access_token})
+        else:
+            logger.critical('unable to get token: %s' % r.text)
+            exit(1)
+
+    try:
+        created_map = test_multiple(999)
+
+        q_url = config.get('url') + "?ql=select * where dataType='entitlements'&limit=1000"
+
+        wait_for_indexing(created_map=created_map,
+                          q_url=q_url,
+                          sleep_time=1)
+
+        delete_q_url = config.get('url') + "?ql=select * where dataType='entitlements'&limit=1000"
+
+        clear(clear_url=delete_q_url)
+
+    except KeyboardInterrupt:
+        processes.terminate()
+
+    processes.terminate()
+
+
+main()
diff --git a/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py b/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py
new file mode 100644
index 0000000..d042d38
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/indexing/entity_index_test.py
@@ -0,0 +1,339 @@
+# -*- coding: utf-8 -*-
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import logging
+from multiprocessing import Pool
+import datetime
+
+import argparse
+import requests
+import time
+from logging.handlers import RotatingFileHandler
+
+import sys
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+entity_template = {
+    "id": "replaced",
+    "dataType": "entitlements",
+    "mockData": [
+        {"importDate": "2015-08-25T23:33:57.124Z", "rowsImported": 2},
+        {"role": "line-owner", "route": "/master", "element": "element1", "entitlementId": "entitlement4",
+         "property": "show"},
+        {"role": "line-owner", "route": "/master", "element": "element2", "entitlementId": "entitlement8",
+         "property": "hide"}
+    ],
+    "nullArray1": [None],
+    "nullArray2": [None, None],
+    "nullArray3": [None, None],
+    "nest1": {
+        "nest2": {
+            "nest3": [None, None, 'foo']
+        }
+    }
+}
+
+entity_template = {
+    "type": "customerstatuses",
+    "name": "1234",
+    "created": 1454769737888,
+    "modified": 1454781811473,
+    "address": {
+        "zip": "35873",
+        "city": "málaga",
+        "street": "3430 calle de bravo murillo",
+        "state": "melilla"
+    },
+    "DOB": "787264244",
+    "email": "begoña.caballero29@example.com",
+    "firstName": "Begoña",
+    "lastName": "Caballero",
+    "lastSeenDateTime": 1447737158857,
+    "locationStatus": "Entrance",
+    "loyaltyAccountNumber": "1234",
+    "loyaltyLevel": "basic",
+    "phone": "966-450-469",
+    "profilePictureUrl": "http://api.randomuser.me/portraits/thumb/women/61.jpg",
+    "status": "Entrance",
+    "storeId": 12121
+}
+
+collection_url_template = '{api_url}/{org}/{app}/{collection}'
+query_url_template = '{api_url}/{org}/{app}/{collection}?ql=select * where tag=\'{tag}\''
+entity_url_template = '{api_url}/{org}/{app}/{collection}/{entity_id}'
+token_url_template = '{api_url}/{org}/{app}/token'
+
+config = {}
+
+session = requests.Session()
+
+logger = logging.getLogger('UsergridEntityIndexTest')
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './usergrid_index_test.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=2048576000,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+def test_multiple(number_of_entities, processes):
+    global config
+
+    start = datetime.datetime.now()
+
+    logger.info('Creating %s entities w/ url=%s' % (number_of_entities, config['url']))
+    created_map = {}
+
+    work_items = []
+
+    for x in xrange(1, number_of_entities + 1):
+        entity = entity_template.copy()
+        entity['id'] = str(x)
+        work_items.append((config['url'], entity))
+
+    responses = processes.map(create_entity, work_items)
+
+    for res in responses:
+        if len(res) > 0:
+            created_map[res[0]] = res[1]
+
+    stop = datetime.datetime.now()
+
+    logger.info('Created [%s] entities in %s' % (number_of_entities, (stop - start)))
+
+    return created_map
+
+
+def clear(clear_url):
+    logger.info('deleting.... ' + clear_url)
+
+    r = session.delete(clear_url)
+
+    if r.status_code != 200:
+        logger.info('error deleting url=' + clear_url)
+        logger.info(json.dumps(r.json()))
+
+    else:
+        res = r.json()
+        len_entities = len(res.get('entities', []))
+
+        if len_entities > 0:
+            clear(clear_url)
+
+
+def test_cleared(q_url):
+    r = session.get(q_url)
+
+    if r.status_code != 200:
+        logger.info(json.dumps(r.json()))
+    else:
+        res = r.json()
+
+        if len(res.get('entities', [])) != 0:
+            logger.info('DID NOT CLEAR')
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Indexing Latency Test')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of the app to perform the test in',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--base_url',
+                        help='The URL of the Usergrid Instance',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--client_id',
+                        help='The Client ID to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--client_secret',
+                        help='The Client Secret to get a token, if needed',
+                        type=str,
+                        required=False)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    url_data = {
+        'api_url': config.get('base_url'),
+        'org': config.get('org'),
+        'app': config.get('app'),
+        'collection': datetime.datetime.now().strftime('index-test-%yx%mx%dx%Hx%Mx%S')
+    }
+
+    config['url_data'] = url_data
+    config['token_url'] = token_url_template.format(**url_data)
+
+
+def create_entity(name, tag):
+    create_me = entity_template.copy()
+    start_tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+    create_me['tag'] = start_tag
+
+    data = config.get('url_data')
+    url = collection_url_template.format(**data)
+
+    r = session.post(url, data=json.dumps(create_me))
+
+    if r.status_code != 200:
+        logger.critical('unable to create entity: %s' % r.text)
+        return None
+    else:
+        return r.json().get('entities')[0]
+
+
+def update_entity(entity_id, tag):
+    data = {'tag': tag}
+    url = entity_url_template.format(entity_id=entity_id, **config.get('url_data'))
+    r = session.put(url, data=json.dumps(data))
+
+    if r.status_code != 200:
+        logger.critical('unable to update entity!')
+        return False
+    else:
+        return True
+
+
+def wait_for_index(entity_id, tag, wait_time=.25):
+    start = datetime.datetime.now()
+
+    url = query_url_template.format(tag=tag, **config.get('url_data'))
+
+    logger.info('GET %s' % url)
+
+    entities = []
+    elapsed = 0
+
+    while len(entities) <= 0:
+        r = session.get(url)
+
+        if r.status_code != 200:
+            logger.critical('Unable to query, url=[%s]: %s' % (url, r.text))
+            return False
+        else:
+            res = r.json()
+            entities = res.get('entities')
+            last_time = datetime.datetime.now()
+            elapsed = last_time - start
+            logger.info(
+                    'Tag [%s] not applied to [%s] after [%s].  Waiting [%s]...' % (tag, entity_id, elapsed, wait_time))
+            time.sleep(wait_time)
+
+    logger.info('++Tag applied after [%s]!' % elapsed)
+
+
+def test_entity_update():
+    start_tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+    name = datetime.datetime.now().strftime('name-%yx%mx%dx%Hx%Mx%S')
+    entity = create_entity(name, start_tag)
+
+    if entity is None:
+        logger.critical('Entity not created, cannot continue')
+        return
+
+    uuid = entity.get('uuid')
+
+    for x in xrange(0, 10):
+        tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+        logger.info('Testing tag [%s] on entity [%s]' % (tag, name))
+        updated = update_entity(name, tag)
+        if updated: wait_for_index(name, tag)
+
+    for x in xrange(0, 10):
+        tag = datetime.datetime.now().strftime('tag-%yx%mx%dx%Hx%Mx%S')
+        logger.info('Testing tag [%s] on entity [%s]' % (tag, uuid))
+        updated = update_entity(uuid, tag)
+        if updated: wait_for_index(uuid, tag)
+
+
+def main():
+    global config
+
+    processes = Pool(32)
+
+    config = parse_args()
+
+    init()
+
+    init_logging()
+
+    if config.get('client_id') is not None and config.get('client_secret') is not None:
+        token_request = {
+            'grant_type': 'client_credentials',
+            'client_id': config.get('client_id'),
+            'client_secret': config.get('client_secret')
+        }
+
+        r = session.post(config.get('token_url'), json.dumps(token_request))
+
+        if r.status_code == 200:
+            access_token = r.json().get('access_token')
+            session.headers.update({'Authorization': 'Bearer %s' % access_token})
+        else:
+            logger.critical('unable to get token: %s' % r.text)
+            exit(1)
+
+    try:
+        test_entity_update()
+
+    except KeyboardInterrupt:
+        pass
+        processes.terminate()
+
+
+main()
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/README.md b/utils/usergrid-util-python/usergrid_tools/iterators/README.md
new file mode 100644
index 0000000..cf61d4c
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/README.md
@@ -0,0 +1,8 @@
+simple_iterator
+---------------
+Basis for iterating a collection or all pages of a query and doing something with the data, such as counting or modifying
+
+
+usergrid_cross_region_iterator
+---------------
+used to iterate data and check that it exists in another region
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py b/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py
new file mode 100644
index 0000000..b64a076
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/__init__.py
@@ -0,0 +1,18 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py b/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py
new file mode 100644
index 0000000..ea42c00
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/simple_iterator.py
@@ -0,0 +1,101 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import logging
+import sys
+import uuid
+from logging.handlers import RotatingFileHandler
+
+import datetime
+from usergrid import UsergridQueryIterator
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+execution_id = str(uuid.uuid4())
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+
+    log_formatter = logging.Formatter(
+            fmt='%(asctime)s | ' + execution_id + ' | %(name)s | %(levelname)s | %(message)s',
+            datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    stdout_logger = logging.StreamHandler(sys.stdout)
+    stdout_logger.setFormatter(log_formatter)
+    stdout_logger.setLevel(logging.CRITICAL)
+    root_logger.addHandler(stdout_logger)
+
+    if stdout_enabled:
+        stdout_logger.setLevel(logging.INFO)
+
+    # base log file
+
+    log_dir = './'
+    log_file_name = '%s/usergrid_iterator.log' % log_dir
+
+    # ConcurrentLogHandler
+    rotating_file = RotatingFileHandler(filename=log_file_name,
+                                        mode='a',
+                                        maxBytes=404857600,
+                                        backupCount=0)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+
+
+def main():
+    init_logging()
+
+    logger = logging.getLogger('SimpleIterator')
+
+    if len(sys.argv) <= 1:
+        logger.critical('usage: usergrid_iterator {url}')
+        exit(1)
+
+    url = sys.argv[1]
+    logger.info('Beginning to iterate URL: %s' % url)
+
+    q = UsergridQueryIterator(url)
+
+    counter = 0
+
+    start = datetime.datetime.utcnow()
+    try:
+        for e in q:
+            counter += 1
+            logger.info('Entity # [%s]: name=[%s] uuid=[%s] created=[%s] modified=[%s]' % (counter, e.get('name'), e.get('uuid'), e.get('created'), e.get('modified')))
+
+    except KeyboardInterrupt:
+        logger.critical('KEYBOARD INTERRUPT')
+        pass
+
+    finish = datetime.datetime.utcnow()
+
+    logger.info('final entity count is [%s] in  [%s] for query [%s]' % (counter, (finish-start), url))
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py
new file mode 100644
index 0000000..08f2bf7
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_cross_region_iterator.py
@@ -0,0 +1,425 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from usergrid import UsergridQuery
+from Queue import Empty
+import argparse
+import json
+import time
+import logging
+import sys
+from multiprocessing import Process, JoinableQueue
+import datetime
+import requests
+import traceback
+import urllib3.contrib.pyopenssl
+
+__author__ = 'Jeff.West@yahoo.com'
+
+urllib3.disable_warnings()
+urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+
+# This was used to force a sync of C* across the regions.  The idea is to query entities from
+# a region where they exist using QL.  Then, iterate over the results and do a GET by UUID
+# in the region where the entities are 'missing'.
+#
+# In order for this to be successful the readcl in the "GET by UUID" region or target region
+# must be set to 'ALL' - this will force a repair across the cluster.
+#
+# It is recommended to have the target tomcat out of the ELB for a customer.  Ideally,
+# you should spin up another Tomcat, leaving 2+ in the ELB for a given customer.
+
+
+logger = logging.getLogger('UsergridCrossRegionRepair')
+
+token_url_template = "{api_url}/management/token"
+org_management_url_template = "{api_url}/management/organizations/{org}/applications?access_token={access_token}"
+org_url_template = "{api_url}/{org}?access_token={access_token}"
+app_url_template = "{api_url}/{org}/{app}?access_token={access_token}"
+collection_url_template = "{api_url}/{org}/{app}/{collection}?access_token={access_token}"
+collection_query_url_template = "{api_url}/{org}/{app}/{collection}?ql={ql}&access_token={access_token}&limit={limit}"
+get_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?access_token={access_token}&connections=none"
+put_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?access_token={access_token}"
+
+# config can be loaded from a file
+config = {}
+
+# config = {
+#     "regions": {
+#         "us_west": {
+#             "api_url": "http://rut040wo:8080"
+#         },
+#         "us_east": {
+#             "api_url": "http://rut154ea:8080"
+#         },
+#         "eu_west": {
+#             "api_url": "http://localhost:8080"
+#         }
+#     },
+#     "management_region_id": "us_west",
+#     "query_region_id": "us_west",
+#     "get_region_ids": [
+#         "us_east"
+#     ]
+# }
+
+session_map = {}
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './cross-region-repair.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=204857600,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+class Worker(Process):
+    def __init__(self, queue, handler_function):
+        super(Worker, self).__init__()
+        logger.warning('Creating worker!')
+        self.queue = queue
+        self.handler_function = handler_function
+
+    def run(self):
+
+        logger.info('starting run()...')
+        keep_going = True
+
+        count_processed = 0
+        count_error = 0
+
+        while keep_going:
+            empty_count = 0
+
+            try:
+                org, app, collection, entity = self.queue.get(timeout=600)
+                logger.debug('Task: org=[%s] app=[%s] collection=[%s] entity=[%s]' % (org, app, collection, entity))
+
+                if self.handler_function is not None:
+                    processed = self.handler_function(org=org,
+                                                      app=app,
+                                                      collection=collection,
+                                                      entity=entity,
+                                                      counter=count_processed)
+
+                    if processed:
+                        count_processed += 1
+                        logger.info('Processed count=[%s] SUCCESS uuid/name = %s / %s' % (
+                            count_processed, entity.get('uuid'), entity.get('name')))
+                    else:
+                        count_error += 1
+                        logger.error('Processed count=[%s] ERROR uuid/name = %s / %s' % (
+                            count_error, entity.get('uuid'), entity.get('name')))
+
+                self.queue.task_done()
+
+            except KeyboardInterrupt, e:
+                raise e
+
+            except Empty:
+                logger.warning('EMPTY!')
+                empty_count += 1
+                if empty_count > 30:
+                    keep_going = False
+
+        logger.warning('WORKER DONE!')
+
+
+def wait_for(threads, sleep_time=3000):
+    count_alive = 1
+
+    while count_alive > 0:
+        count_alive = 0
+
+        for t in threads:
+
+            if t.is_alive():
+                count_alive += 1
+
+        if count_alive > 0:
+            logger.warning('Waiting for [%s] processes to finish' % count_alive)
+            time.sleep(sleep_time)
+
+
+def parse_args():
+    DEFAULT_WORKERS = 16
+    DEFAULT_TOKEN_TTL = 25200000
+
+    parser = argparse.ArgumentParser(description='Usergrid Cross-Region Repair Script')
+
+    parser.add_argument('-o', '--org',
+                        help='The org to iterate',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='The org to iterate',
+                        action='append',
+                        default=[])
+
+    parser.add_argument('-c', '--collection',
+                        help='The org to iterate',
+                        action='append',
+                        default=[])
+
+    parser.add_argument('-p', '--password',
+                        help='The Password for the token request',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-u', '--username',
+                        help='The Username for the token request',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-w', '--workers',
+                        help='The Password for the token request',
+                        type=int,
+                        default=DEFAULT_WORKERS)
+
+    parser.add_argument('--ttl',
+                        help='The TTL for the token request',
+                        type=int,
+                        default=DEFAULT_TOKEN_TTL)
+
+    parser.add_argument('-l', '--limit',
+                        help='The global limit for QL requests',
+                        type=int,
+                        default=DEFAULT_WORKERS * 3)
+
+    parser.add_argument('-f', '--config',
+                        help='The file from which to load the configuration',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def get_by_UUID(org, app, collection, entity, counter, attempts=0):
+    response = False
+
+    if attempts >= 10:
+        return False
+
+    for region_id in config.get('get_region_ids', []):
+        url_data = config.get('regions', {}).get(region_id)
+
+        url = get_entity_url_template.format(collection=collection,
+                                             app=app,
+                                             uuid=entity.get('uuid'),
+                                             org=org,
+                                             access_token=config['access_token'],
+                                             **url_data)
+
+        logger.info('GET [%s]: %s' % ('...', url))
+
+        session = session_map[region_id]
+
+        while not response:
+
+            try:
+                r = session.get(url)
+
+                if r.status_code != 200:
+                    logger.error('GET [%s] (%s): %s' % (r.status_code, r.elapsed, url))
+                    logger.warning('Sleeping for 5 on connection retry...')
+
+                    return get_by_UUID(org, app, collection, entity, counter, attempts=attempts + 1)
+
+                else:
+                    logger.info('GET [%s] (%s): %s' % (r.status_code, r.elapsed, url))
+                    response = True
+
+                if counter % 10 == 0:
+                    logger.info('COUNTER=[%s] time=[%s] GET [%s]: %s' % (counter,
+                                                                         r.elapsed,
+                                                                         r.status_code,
+                                                                         url))
+            except:
+                logger.error(traceback.format_exc())
+                logger.error('EXCEPTION on GET [...] (...): %s' % url)
+                response = False
+                logger.warning('Sleeping for 5 on connection retry...')
+                time.sleep(5)
+
+    return response
+
+
+def init(args):
+    global config
+
+    if args.get('config') is not None:
+        config_filename = args.get('config')
+
+        logger.warning('Using config file: %s' % config_filename)
+
+        try:
+            with open(config_filename, 'r') as f:
+                parsed_config = json.load(f)
+                logger.warning('Updating config with: %s' % parsed_config)
+                config.update(parsed_config)
+        except:
+            print traceback.format_exc()
+
+    for region_id, region_data in config.get('regions', {}).iteritems():
+        session_map[region_id] = requests.Session()
+
+
+def main():
+    global config
+
+    args = parse_args()
+    init(args)
+
+    management_region_id = config.get('management_region_id', '')
+    management_region = config.get('regions', {}).get(management_region_id)
+
+    query_region_id = config.get('query_region_id', '')
+    query_region = config.get('regions', {}).get(query_region_id)
+
+    start = datetime.datetime.now()
+
+    queue = JoinableQueue()
+
+    logger.warning('Starting workers...')
+    init_logging()
+
+    token_request = {
+        'grant_type': 'password',
+        'username': args.get('username'),
+        'ttl': args.get('ttl')
+    }
+
+    url = token_url_template.format(**management_region)
+
+    logger.info('getting token with url=[%s] data=[%s]' % (url, token_request))
+
+    token_request['password'] = args.get('password')
+
+    r = requests.post(url, data=json.dumps(token_request))
+
+    if r.status_code != 200:
+        logger.critical('did not get access token! response: %s' % r.json())
+        exit(-1)
+
+    logger.info(r.json())
+
+    config['access_token'] = r.json().get('access_token')
+
+    org_mgmt_url = org_management_url_template.format(org=args.get('org'),
+                                                      access_token=config['access_token'],
+                                                      **management_region)
+    logger.info(org_mgmt_url)
+
+    session = session_map[management_region_id]
+
+    r = session.get(org_mgmt_url)
+    logger.info(r.json())
+    logger.info('starting [%s] workers...' % args.get('workers'))
+    workers = [Worker(queue, get_by_UUID) for x in xrange(args.get('workers'))]
+    [w.start() for w in workers]
+
+    try:
+        org_app_data = r.json().get('data')
+
+        logger.info(org_app_data)
+
+        apps_to_process = config.get('app', [])
+        collections_to_process = config.get('collection', [])
+
+        for org_app, app_uuid in org_app_data.iteritems():
+            parts = org_app.split('/')
+            app = parts[1]
+
+            if len(apps_to_process) > 0 and app not in apps_to_process:
+                logger.info('Skipping app/uuid: %s/%s' % (org_app, app_uuid))
+                continue
+
+            logger.info('app UUID: %s' % app_uuid)
+
+            url = app_url_template.format(app=app,
+                                          org=args.get('org'),
+                                          access_token=config['access_token'],
+                                          **management_region)
+
+            logger.info('GET [...]: %s' % url)
+            session = session_map[management_region_id]
+            r = session.get(url)
+
+            for collection_name in r.json().get('entities', [{}])[0].get('metadata', {}).get('collections', {}):
+
+                if collection_name in ['events']:
+                    continue
+
+                elif len(collections_to_process) > 0 and collection_name not in collections_to_process:
+                    logger.info('skipping collection=%s' % collection_name)
+                    continue
+
+                logger.info('processing collection=%s' % collection_name)
+
+                url = collection_query_url_template.format(ql='select * order by created asc',
+                                                           collection=collection_name,
+                                                           org=args['org'],
+                                                           app=app,
+                                                           limit=args['limit'],
+                                                           access_token=config['access_token'],
+                                                           **query_region)
+
+                q = UsergridQuery(url)
+                counter = 0
+
+                for x, e in enumerate(q):
+                    counter += 1
+                    queue.put((args['org'], app, collection_name, e))
+
+                logger.info('collection=%s, count=%s' % (collection_name, counter))
+
+    except KeyboardInterrupt:
+        [w.terminate() for w in workers]
+
+    logger.warning('Waiting for workers to finish...')
+    wait_for(workers)
+
+    finish = datetime.datetime.now()
+    logger.warning('Done!  Took: %s ' % (finish - start))
+
+
+main()
diff --git a/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_iterator.py b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_iterator.py
new file mode 100644
index 0000000..7929a58
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/iterators/usergrid_iterator.py
@@ -0,0 +1,504 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from Queue import Empty
+import json
+import logging
+import sys
+from multiprocessing import Queue, Process
+import traceback
+from logging.handlers import RotatingFileHandler
+import time
+
+import argparse
+
+from usergrid import UsergridClient, UsergridError
+
+__author__ = 'Jeff.West@yahoo.com'
+
+logger = logging.getLogger('UsergridIterator')
+
+# SAMPLE CONFIG FILE for source and target
+sample_config = {
+    "endpoint": {
+        "api_url": "https://api.usergrid.com",
+        "limit": 100
+    },
+
+    "credentials": {
+        "myOrg": {
+            "client_id": "<<client_id>>",
+            "client_secret": "<<client_secret>>"
+        }
+    }
+}
+
+
+def init_logging(file_enabled=False, stdout_enabled=True):
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.INFO)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    if file_enabled:
+        log_file_name = './UsergridIterator.log'
+
+        rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                             mode='a',
+                                                             maxBytes=204857600,
+                                                             backupCount=10)
+        rotating_file.setFormatter(log_formatter)
+        rotating_file.setLevel(logging.INFO)
+
+        root_logger.addHandler(rotating_file)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+
+        root_logger.addHandler(stdout_logger)
+
+
+config = {}
+
+
+class Worker(Process):
+    """
+    The worker is used to perform a set of handler functions in a chain.  Work is provided for the Worker thread(s) on
+    a JoinableQueue.  The thread will continue until either 1) it is explicitly terminated or 2) until it does not
+     receive work on the queue after a consecutive number of attempts (max_empty_count) using the specified timeout
+     (queue_timeout)
+    """
+
+    def __init__(self,
+                 queue,
+                 source_client,
+                 target_client,
+                 max_empty_count=3,
+                 queue_timeout=10,
+                 function_chain=None):
+        """
+        This is an example handler function which can transform an entity. Multiple handler functions can be used to
+        process a entity.  The response is an entity which will get passed to the next handler in the chain
+
+        :param queue: The queue on which to listen for work
+        :param source_client: The UsergridClient of the source Usergrid instance
+        :param target_client: The UsergridClient of the target Usergrid instance
+        :param max_empty_count: The maximum number of times for a worker to not receive work after checking the queue
+        :param queue_timeout: The timeout for waiting for work on the queue
+        :param function_chain: An array of function pointers which will be executed in array sequence, expeting the following parameters: org_name, app_name, collection_name, entity, source_client, target_client, attempts=0p
+        """
+
+        super(Worker, self).__init__()
+        logger.warning('Creating worker!')
+
+        if not function_chain:
+            function_chain = []
+
+        self.function_chain = function_chain
+        self.queue = queue
+        self.source_client = source_client
+        self.target_client = target_client
+        self.max_empty_count = max_empty_count
+        self.queue_timeout = queue_timeout
+
+    def run(self):
+        logger.info('starting run()...')
+        keep_going = True
+
+        count_processed = 0
+        count_failed = 0
+        empty_count = 0
+
+        while keep_going:
+
+            try:
+                org, app, collection_name, entity = self.queue.get(timeout=self.queue_timeout)
+
+                empty_count = 0
+                success = True
+                entity_param = entity
+
+                for handler in self.function_chain:
+
+                    if entity_param is not None:
+                        try:
+                            entity_param = handler(org, app, collection_name, entity_param, self.source_client,
+                                                   self.target_client)
+                        except Exception, e:
+                            logger.error(e)
+                            print traceback.format_exc()
+                            success = False
+
+                if success:
+                    count_processed += 1
+                    logger.info('Processed [%sth] SUCCESS app/collection/name/uuid = %s / %s / %s / %s' % (
+                        count_processed, app, collection_name, entity.get('name'), entity.get('uuid')))
+                else:
+                    count_failed += 1
+                    logger.warning('Processed [%sth] FAILURE app/collection/name/uuid = %s / %s / %s / %s' % (
+                        count_processed, app, collection_name, entity.get('name'), entity.get('uuid')))
+
+            except KeyboardInterrupt, e:
+                raise e
+
+            except Empty:
+                logger.warning(
+                    'No task received after timeout=[%s]! Empty Count=%s' % (self.queue_timeout, empty_count))
+
+                empty_count += 1
+
+                if empty_count >= self.max_empty_count:
+                    logger.warning('Stopping work after empty_count=[%s]' % empty_count)
+                    keep_going = False
+
+        logger.info('Worker finished!')
+
+
+def filter_entity(org_name, app_name, collection_name, entity_data, source_client, target_client, attempts=0):
+    """
+    This is an example handler function which can filter entities. Multiple handler functions can be used to
+    process a entity.  The response is an entity which will get passed to the next handler in the chain
+
+    :param org_name: The org name from whence this entity came
+    :param app_name: The app name from whence this entity came
+    :param collection_name: The collection name from whence this entity came
+    :param entity: The entity retrieved from the source instance
+    :param source_client: The UsergridClient for the source Usergrid instance
+    :param target_client: The UsergridClient for the target Usergrid instance
+    :param attempts: the number of previous attempts this function was run (manual, not part of the framework)
+    :return: an entity.  If response is None then the chain will stop.
+    """
+
+    # return None if you want to stop the chain (filter the entity out)
+    if 'blah' in entity_data:
+        return None
+
+    # return the entity to keep going
+    return entity_data
+
+
+def transform_entity(org_name, app_name, collection_name, entity_data, source_client, target_client, attempts=0):
+    """
+    This is an example handler function which can transform an entity. Multiple handler functions can be used to
+    process a entity.  The response is an entity which will get passed to the next handler in the chain
+
+    :param org_name: The org name from whence this entity came
+    :param app_name: The app name from whence this entity came
+    :param collection_name: The collection name from whence this entity came
+    :param entity: The entity retrieved from the source instance
+    :param source_client: The UsergridClient for the source Usergrid instance
+    :param target_client: The UsergridClient for the target Usergrid instance
+    :param attempts: the number of previous attempts this function was run (manual, not part of the framework)
+    :return: an entity.  If response is None then the chain will stop.
+    """
+    # this just returns the entity with no transform
+    return entity_data
+
+
+def create_new(org_name, app_name, collection_name, entity_data, source_client, target_client, attempts=0):
+    """
+    This is an example handler function which can be used to create a new entity in the target instance (based on the
+    target_client) parameter. Multiple handler functions can be used to process a entity.  The response is an entity
+    which will get passed to the next handler in the chain
+
+    :param org_name: The org name from whence this entity came
+    :param app_name: The app name from whence this entity came
+    :param collection_name: The collection name from whence this entity came
+    :param entity_data: The entity retrieved from the source instance
+    :param source_client: The UsergridClient for the source Usergrid instance
+    :param target_client: The UsergridClient for the target Usergrid instance
+    :param attempts: the number of previous attempts this function was run (manual, not part of the framework)
+    :return: an entity.  If response is None then the chain will stop.
+    """
+
+    attempts += 1
+
+    if 'metadata' in entity_data: entity_data.pop('metadata')
+
+    target_org = config.get('target_org')
+    target_app = config.get('app_mapping', {}).get(app_name, app_name)
+    target_collection = config.get('collection_mapping', {}).get(collection_name, collection_name)
+
+    if target_client:
+        try:
+            c = target_client.org(target_org).app(target_app).collection(target_collection)
+            e = c.entity_from_data(entity_data)
+            e.put()
+
+        except UsergridError as err:
+            logger.error(err)
+            raise err
+
+    return None
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid App/Collection Iterator')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to migrate',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Multiple, name of apps to include, skip to include all',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('-c', '--collection',
+                        help='Multiple, name of collections to include, skip to include all',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--ql',
+                        help='The Query string for processing the source collection(s)',
+                        type=str,
+                        default='select *')
+
+    parser.add_argument('-s', '--source_config',
+                        help='The configuration of the source endpoint/org',
+                        type=str,
+                        default='source.json')
+
+    parser.add_argument('-d', '--target_config',
+                        help='The configuration of the target endpoint/org',
+                        type=str,
+                        default='destination.json')
+
+    parser.add_argument('-w', '--workers',
+                        help='The number of worker threads',
+                        type=int,
+                        default=1)
+
+    parser.add_argument('-f', '--force',
+                        help='Force an update regardless of modified date',
+                        type=bool,
+                        default=False)
+
+    parser.add_argument('--max_empty_count',
+                        help='The number of iterations for an individual worker to receive no work before stopping',
+                        type=int,
+                        default=3)
+
+    parser.add_argument('--queue_timeout',
+                        help='The duration in seconds for an individual worker queue poll before Empty is raised',
+                        type=int,
+                        default=10)
+
+    parser.add_argument('--map_app',
+                        help="A colon-separated string such as 'apples:oranges' which indicates to put data from the app named 'apples' from the source endpoint into app named 'oranges' in the target endpoint",
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--map_collection',
+                        help="A colon-separated string such as 'cats:dogs' which indicates to put data from collections named 'cats' from the source endpoint into a collection named 'dogs' in the target endpoint, applicable to all apps",
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--target_org',
+                        help="The org name at the Usergrid destination instance",
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    config['collection_mapping'] = {}
+    config['app_mapping'] = {}
+    config['org_mapping'] = {}
+
+    with open(config.get('source_config'), 'r') as f:
+        config['source_config'] = json.load(f)
+
+    with open(config.get('target_config'), 'r') as f:
+        config['target_config'] = json.load(f)
+
+    for mapping in config.get('map_collection', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['collection_mapping'][parts[0]] = parts[1]
+        else:
+            logger.warning('Skipping malformed Collection mapping: [%s]' % mapping)
+
+    for mapping in config.get('map_app', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['app_mapping'][parts[0]] = parts[1]
+        else:
+            logger.warning('Skipping malformed App mapping: [%s]' % mapping)
+
+    for mapping in config.get('map_org', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['org_mapping'][parts[0]] = parts[1]
+        else:
+            logger.warning('Skipping Org mapping: [%s]' % mapping)
+
+    if 'source_config' in config:
+        config['source_endpoint'] = config['source_config'].get('endpoint').copy()
+        config['source_endpoint'].update(config['source_config']['credentials'][config['org']])
+
+    config['target_org'] = config['target_org'] if config['target_org'] else config['org']
+
+    if 'target_config' in config:
+        config['target_endpoint'] = config['target_config'].get('endpoint').copy()
+        config['target_endpoint'].update(config['target_config']['credentials'][config['target_org']])
+
+
+def wait_for(arr_threads, sleep_time=3):
+    """
+    This function pauses the thread until the array of threads which is provided all stop working
+
+    :param arr_threads: an array of Process objects to monitor
+    :param sleep_time: the time to sleep between evaluating the array for completion
+    :return: None
+    """
+    threads_working = 100
+
+    while threads_working > 0:
+        threads_working = 0
+
+        for t in arr_threads:
+
+            if t.is_alive():
+                threads_working += 1
+
+        if threads_working > 0:
+            logger.warn('Waiting for [%s] threads to finish...' % threads_working)
+            time.sleep(sleep_time)
+
+    logger.warn('Worker Threads finished!')
+
+
+class UsergridIterator:
+    def __init__(self):
+        pass
+
+    def get_to_work(self):
+        global config
+
+        queue = Queue()
+        logger.warning('Starting workers...')
+
+        apps_to_process = config.get('app')
+        collections_to_process = config.get('collection')
+        source_org = config['org']
+        target_org = config.get('target_org', config.get('org'))
+
+        source_client = None
+        target_client = None
+
+        try:
+            source_client = UsergridClient(api_url=config['source_endpoint']['api_url'],
+                                           org_name=source_org)
+            source_client.authenticate_management_client(
+                client_credentials=config['source_config']['credentials'][source_org])
+
+        except UsergridError, e:
+            logger.critical(e)
+            exit()
+
+        if 'target_endpoint' in config:
+            try:
+                target_client = UsergridClient(api_url=config['target_endpoint']['api_url'],
+                                               org_name=target_org)
+                target_client.authenticate_management_client(
+                    client_credentials=config['target_config']['credentials'][target_org])
+
+            except UsergridError, e:
+                logger.critical(e)
+                exit()
+
+        function_chain = [filter_entity, transform_entity, create_new]
+
+        workers = [Worker(queue=queue,
+                          source_client=source_client,
+                          target_client=target_client,
+                          function_chain=function_chain,
+                          max_empty_count=config.get('max_empty_count', 3),
+                          queue_timeout=config.get('queue_timeout', 10))
+
+                   for x in xrange(config.get('workers', 1))]
+
+        [w.start() for w in workers]
+
+        for app in source_client.list_apps():
+
+            if len(apps_to_process) > 0 and app not in apps_to_process:
+                logger.warning('Skipping app=[%s]' % app)
+                continue
+
+            logger.warning('Processing app=[%s]' % app)
+
+            source_app = source_client.organization(source_org).application(app)
+
+            for collection_name, collection in source_app.list_collections().iteritems():
+
+                if collection_name in ['events', 'queues']:
+                    logger.warning('Skipping internal collection=[%s]' % collection_name)
+                    continue
+
+                if len(collections_to_process) > 0 and collection_name not in collections_to_process:
+                    logger.warning('Skipping collection=[%s]' % collection_name)
+                    continue
+
+                logger.warning('Processing collection=%s' % collection_name)
+
+                counter = 0
+
+                try:
+                    for entity in collection.query(ql=config.get('ql'),
+                                                   limit=config.get('source_endpoint', {}).get('limit', 100)):
+                        counter += 1
+                        queue.put((config.get('org'), app, collection_name, entity))
+
+                except KeyboardInterrupt:
+                    [w.terminate() for w in workers]
+
+            logger.info('Publishing entities complete!')
+
+        wait_for(workers)
+
+        logger.info('All done!!')
+
+
+def main():
+    global config
+    config = parse_args()
+    init()
+
+    init_logging()
+
+    UsergridIterator().get_to_work()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/library_check.py b/utils/usergrid-util-python/usergrid_tools/library_check.py
new file mode 100644
index 0000000..d053057
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/library_check.py
@@ -0,0 +1,45 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import traceback
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+url_data = {
+    "api_url": "https://usergrid-e2e-prod.e2e.apigee.net/appservices-2-1/",
+    "org": "",
+    "app": "",
+    "client_id": "",
+    "client_secret": "",
+
+}
+
+collection_url_template = "{api_url}/{org}/{app}/{collection}"
+
+try:
+    from usergrid import UsergridQueryIterator
+
+    q = UsergridQueryIterator('')
+
+    print 'Check OK'
+
+except Exception, e:
+    print traceback.format_exc(e)
+    print 'Check Failed'
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/README.md b/utils/usergrid-util-python/usergrid_tools/migration/README.md
new file mode 100644
index 0000000..921b0a7
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/README.md
@@ -0,0 +1,234 @@
+# Usergrid Data Migrator
+
+## Prerequisites
+* Python 2 (not python 3)
+
+* Install the Usergrid Python SDK: https://github.com/jwest-apigee/usergrid-python
+
+With Pip (requires python-pip to be installed): `pip install usergrid`
+
+* Install Usergrid Tools
+
+With Pip (requires python-pip to be installed): `pip install usergrid-tools`
+
+
+## Overview
+The purpose of this document is to provide an overview of the Python Script provided in the same directory which allows you to migrate data, connections and users from one Usergrid platform / org / app to another.  This can be used in the upgrade process from Usergrid 1.0 to 2.x since there is no upgrade path.
+
+This script functions by taking source and target endpoint configurations (with credentials) and a set of command-line parameters to read data from one Usergrid instance and write to another.  It is written in Python and requires Python 2.7.6+.
+
+There are multiple processes at work in the migration to speed the process up.  There is a main thread which reads entities from the API and then publishes the entities with metadata into a Python Queue which has multiple worker processes listening for work.  The number of worker threads is configurable by command line parameters.
+
+
+# Process to Migrate Data and Graph (Connections)
+Usergrid is a Graph database and allows for connections between entities.  In order for a connection to be made, both the source entity and the target entity must exist.  Therefore, in order to migrate connections it is adviseable to first migrate all the data and then all the connections associated with that data.
+
+# Concepts
+As with any migration process there is a source and a target.  The source and target have the following parameters:
+
+* API URL: The HTTP[S] URL where the platform can be reached
+* Org: You must specify one org at a time to migrate using this script
+* App: You can optinally specify one or more applications to migrate.  If you specify zero applications then all applications will be migrated
+* Collection: You can optionally specify one or more collections to migrate.  If you specify zero collections then all applications will be migrated
+* QL: You can specify a Query Language predicate to be used.  If none is specified, 'select *' will be used which will migrate all data within a given collection
+* Graph: Graph implies traversal of graph edges which necessarily must exist.  This is an alternative to using query which uses the indexing.  
+
+# Graph Loops
+
+When iterating a graph it is possible to get stuck in a loop.  For example:
+
+```
+A --follows--> B
+B --likes--> C
+C --loves--> A
+```
+
+There are two options to prevent getting stuck in a loop:
+* `graph_depth` option - this will limit the graph depth which will be traversed from a given entity.
+* And/Or Marking nodes and edges as 'visited'.  This requires a place to store this state.  See Using Redis in the next section
+
+# Using Redis 
+
+Redis can be used for the following:
+
+If using Redis, version 2.8+ is needed because TTL is used with the 'ex' parameter.
+
+* Keeping track of the modified date for each entity.  When running the script subsequent times after this, entiites which were not modified will not be copied.
+* Keeping track of visited nodes for migrating a graph.  This is done with a TTL such that a job can be resumed, but since there is no modified date on an edge you cannot know if there are new edges or not.  Therefore, when the TTL expires the nodes will be visited again
+* Keeping track of the URLs for the connections which are created between entities.  This has no TTL.  Subsequent runs will not create connections which are found in Redis which have already been created.
+
+
+# Mapping
+Using this script it is not necessary to keep the same application name, org name and/or collection name as the source at the target.  For example, you could migrate from /myOrg/myApp/myCollection to /org123/app456/collections789.  
+
+
+# Configuration Files
+Example source/target configuration files:
+
+```
+{
+  "endpoint": {
+    "api_url": "https://api.usergrid.com"
+  },
+  "credentials": {
+    "myOrg1": {
+      "client_id": "YXA6lpg9sEaaEeONxG0g3Uz44Q",
+      "client_secret": "ZdF66u2h3Hc7csOcsEtgewmxalB1Ygg"
+    },
+    "myOrg2": {
+      "client_id": "ZXf63p239sDaaEeONSG0g3Uz44Z",
+      "client_secret": "ZdF66u2h3Hc7csOcsEtgewmxajsadfj32"
+    }
+  }
+}
+```
+* api_url: the API URL to access/write data
+* Credentials:
+ * For each org, with the org name (case-sensetive) as the key:
+  * client_id - the org-level Client ID. This can be retrieved from the BaaS/Usergrid Portal.
+  * client_secret - the org-level Client Secret. This can be retrieved from the BaaS/Usergrid Portal.
+
+# Command Line Parameters
+
+```
+Usergrid Org/App Data Migrator
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --log_dir LOG_DIR     path to the place where logs will be written
+  --log_level LOG_LEVEL
+                        log level - DEBUG, INFO, WARN, ERROR, CRITICAL
+  -o ORG, --org ORG     Name of the org to migrate
+  -a APP, --app APP     Name of one or more apps to include, specify none to
+                        include all apps
+  -e INCLUDE_EDGE, --include_edge INCLUDE_EDGE
+                        Name of one or more edges/connection types to INCLUDE,
+                        specify none to include all edges
+  --exclude_edge EXCLUDE_EDGE
+                        Name of one or more edges/connection types to EXCLUDE,
+                        specify none to include all edges
+  --exclude_collection EXCLUDE_COLLECTION
+                        Name of one or more collections to EXCLUDE, specify
+                        none to include all collections
+  -c COLLECTION, --collection COLLECTION
+                        Name of one or more collections to include, specify
+                        none to include all collections
+  --use_name_for_collection USE_NAME_FOR_COLLECTION
+                        Name of one or more collections to use [name] instead
+                        of [uuid] for creating entities and edges
+  -m {data,none,reput,credentials,graph}, --migrate {data,none,reput,credentials,graph}
+                        Specifies what to migrate: data, connections,
+                        credentials, audit or none (just iterate the
+                        apps/collections)
+  -s SOURCE_CONFIG, --source_config SOURCE_CONFIG
+                        The path to the source endpoint/org configuration file
+  -d TARGET_CONFIG, --target_config TARGET_CONFIG
+                        The path to the target endpoint/org configuration file
+  --limit LIMIT         The number of entities to return per query request
+  -w ENTITY_WORKERS, --entity_workers ENTITY_WORKERS
+                        The number of worker processes to do the migration
+  --visit_cache_ttl VISIT_CACHE_TTL
+                        The TTL of the cache of visiting nodes in the graph
+                        for connections
+  --error_retry_sleep ERROR_RETRY_SLEEP
+                        The number of seconds to wait between retrieving after
+                        an error
+  --page_sleep_time PAGE_SLEEP_TIME
+                        The number of seconds to wait between retrieving pages
+                        from the UsergridQueryIterator
+  --entity_sleep_time ENTITY_SLEEP_TIME
+                        The number of seconds to wait between retrieving pages
+                        from the UsergridQueryIterator
+  --collection_workers COLLECTION_WORKERS
+                        The number of worker processes to do the migration
+  --queue_size_max QUEUE_SIZE_MAX
+                        The max size of entities to allow in the queue
+  --graph_depth GRAPH_DEPTH
+                        The graph depth to traverse to copy
+  --queue_watermark_high QUEUE_WATERMARK_HIGH
+                        The point at which publishing to the queue will PAUSE
+                        until it is at or below low watermark
+  --min_modified MIN_MODIFIED
+                        Break when encountering a modified date before this,
+                        per collection
+  --max_modified MAX_MODIFIED
+                        Break when encountering a modified date after this,
+                        per collection
+  --queue_watermark_low QUEUE_WATERMARK_LOW
+                        The point at which publishing to the queue will RESUME
+                        after it has reached the high watermark
+  --ql QL               The QL to use in the filter for reading data from
+                        collections
+  --skip_cache_read     Skip reading the cache (modified timestamps and graph
+                        edges)
+  --skip_cache_write    Skip updating the cache with modified timestamps of
+                        entities and graph edges
+  --create_apps         Create apps at the target if they do not exist
+  --nohup               specifies not to use stdout for logging
+  --graph               Use GRAPH instead of Query
+  --su_username SU_USERNAME
+                        Superuser username
+  --su_password SU_PASSWORD
+                        Superuser Password
+  --inbound_connections
+                        Name of the org to migrate
+  --map_app MAP_APP     Multiple allowed: A colon-separated string such as
+                        'apples:oranges' which indicates to put data from the
+                        app named 'apples' from the source endpoint into app
+                        named 'oranges' in the target endpoint
+  --map_collection MAP_COLLECTION
+                        One or more colon-separated string such as 'cats:dogs'
+                        which indicates to put data from collections named
+                        'cats' from the source endpoint into a collection
+                        named 'dogs' in the target endpoint, applicable
+                        globally to all apps
+  --map_org MAP_ORG     One or more colon-separated strings such as 'red:blue'
+                        which indicates to put data from org named 'red' from
+                        the source endpoint into a collection named 'blue' in
+                        the target endpoint
+```
+
+## Example Command Line
+
+Use the following command to migrate DATA AND GRAPH  (no graph edges or connections between entities).  If there are no graph edges (connections) then using `-m graph` is not necessary.  This will copy all data from all apps in the org 'myorg', creating apps in the target org if they do not already exist.  Note that --create_apps will be required if the Apps in the target org have not been created.
+
+```
+$ usergrid_data_migrator -o myorg -m graph -w 4 -s mySourceConfig.json -d myTargetConfiguration.json  --create_apps
+```
+
+Use the following command to migrate DATA ONLY (no graph edges or connections between entities).  This will copy all data from all apps in the org 'myorg', creating apps in the target org if they do not already exist.  Note that --create_apps will be required if the Apps in the target org have not been created.
+
+```
+$ usergrid_data_migrator -o myorg -m data -w 4 -s mySourceConfig.json -d myTargetConfiguration.json --create_apps
+```
+
+Use the following command to migrate CREDENTIALS for Application-level Users.  Note that `usergrid.sysadmin.login.allowed=true` must be set in the `usergrid-deployment.properties` file on the source and target Tomcat nodes.
+
+```
+$ usergrid_data_migrator -o myorg -m credentails -w 4 -s mySourceConfig.json -d myTargetConfiguration.json --create_apps --su_username foo --su_password bar
+```
+
+This command:
+
+```
+$ usergrid_data_migrator -o myorg -a app1 -a app2 -m data -w 4 --map_app app1:appplication_1 --map_app app2:application_2 --map_collection pets:animals --map_org myorg:my_new_org -s mySourceConfig.json -d myTargetConfiguration.json
+```
+will do the following: 
+
+* migrate Apps named 'app1' and 'app2' in org named 'myorg' from the API endpoint defined in 'mySourceConfig.json' to the API endpoint defined in 'myTargetConfiguration.json'
+* In the process:
+** data from 'myorg' will ge migrated to the org named 'my_new_org'
+** data from 'app1' will be migrated to the app named 'application_1'
+** data from 'app2' will be migrated to the app named 'application_2'
+** all collections named 'pets' will be overridden at the destination to 'animals'
+
+
+# FAQ
+
+### Does the process keep the same UUIDs?
+
+* Yes - with this script the same UUIDs can be kept from the source into the destination.  An exception is if you specify going from one collection to another under the same Org hierarchy.
+
+### Does the process keep the ordering of connections by time?
+
+* Yes ordering of connections is maintained in the process. 
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/__init__.py b/utils/usergrid-util-python/usergrid_tools/migration/__init__.py
new file mode 100644
index 0000000..7247862
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/__init__.py
@@ -0,0 +1,24 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import usergrid_data_migrator
+import usergrid_data_exporter
+
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_exporter.py b/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_exporter.py
new file mode 100644
index 0000000..0cbb9e1
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_exporter.py
@@ -0,0 +1,943 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from __future__ import print_function
+import os
+import uuid
+from Queue import Empty
+import argparse
+import json
+import logging
+import sys
+from multiprocessing import Queue, Process
+import time_uuid
+
+import datetime
+from cloghandler import ConcurrentRotatingFileHandler
+import requests
+import traceback
+import time
+from sys import platform as _platform
+
+import signal
+
+from usergrid import UsergridQueryIterator
+import urllib3
+
+__author__ = 'Jeff.West@yahoo.com'
+
+ECID = str(uuid.uuid1())
+key_version = 'v4'
+
+logger = logging.getLogger('GraphMigrator')
+worker_logger = logging.getLogger('Worker')
+collection_worker_logger = logging.getLogger('CollectionWorker')
+error_logger = logging.getLogger('ErrorLogger')
+audit_logger = logging.getLogger('AuditLogger')
+status_logger = logging.getLogger('StatusLogger')
+
+urllib3.disable_warnings()
+
+DEFAULT_CREATE_APPS = False
+DEFAULT_RETRY_SLEEP = 10
+DEFAULT_PROCESSING_SLEEP = 1
+
+queue = Queue()
+QSIZE_OK = False
+
+try:
+    queue.qsize()
+    QSIZE_OK = True
+except:
+    pass
+
+session_source = requests.Session()
+session_target = requests.Session()
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))
+
+    # root_logger.setLevel(logging.WARN)
+
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+
+    log_formatter = logging.Formatter(
+            fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+            datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    stdout_logger = logging.StreamHandler(sys.stdout)
+    stdout_logger.setFormatter(log_formatter)
+    root_logger.addHandler(stdout_logger)
+
+    if stdout_enabled:
+        stdout_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))
+
+    # base log file
+
+    log_file_name = '%s/migrator.log' % config.get('log_dir')
+
+    # ConcurrentRotatingFileHandler
+    rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name,
+                                                  mode='a',
+                                                  maxBytes=404857600,
+                                                  backupCount=0)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+
+    error_log_file_name = '%s/migrator_errors.log' % config.get('log_dir')
+    error_rotating_file = ConcurrentRotatingFileHandler(filename=error_log_file_name,
+                                                        mode='a',
+                                                        maxBytes=404857600,
+                                                        backupCount=0)
+    error_rotating_file.setFormatter(log_formatter)
+    error_rotating_file.setLevel(logging.ERROR)
+
+    root_logger.addHandler(error_rotating_file)
+
+
+entity_name_map = {
+    'users': 'username'
+}
+
+config = {}
+
+# URL Templates for Usergrid
+org_management_app_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_management_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_url_template = "{api_url}/{org}?client_id={client_id}&client_secret={client_secret}"
+app_url_template = "{api_url}/{org}/{app}?client_id={client_id}&client_secret={client_secret}"
+collection_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}"
+collection_query_url_template = "{api_url}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}&limit={limit}"
+collection_graph_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}&limit={limit}"
+connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}?client_id={client_id}&client_secret={client_secret}"
+connecting_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/connecting/{verb}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_uuid_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_name_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_type}/{target_name}?client_id={client_id}&client_secret={client_secret}"
+get_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}&connections=none"
+get_entity_url_with_connections_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+put_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+
+user_credentials_url_template = "{api_url}/{org}/{app}/users/{uuid}/credentials"
+
+ignore_collections = ['activities', 'queues', 'events', 'notifications']
+
+
+class StatusListener(Process):
+    def __init__(self, status_queue, worker_queue):
+        super(StatusListener, self).__init__()
+        self.status_queue = status_queue
+        self.worker_queue = worker_queue
+
+    def run(self):
+        keep_going = True
+
+        org_results = {
+            'name': config.get('org'),
+            'apps': {},
+        }
+
+        empty_count = 0
+
+        while keep_going:
+
+            try:
+                app, collection, status_map = self.status_queue.get(timeout=60)
+                status_logger.info('Received status update for app/collection: [%s / %s]' % (app, collection))
+                empty_count = 0
+                org_results['summary'] = {
+                    'max_created': -1,
+                    'max_modified': -1,
+                    'min_created': 1584946416000,
+                    'min_modified': 1584946416000,
+                    'count': 0,
+                    'bytes': 0
+                }
+
+                if app not in org_results['apps']:
+                    org_results['apps'][app] = {
+                        'collections': {}
+                    }
+
+                org_results['apps'][app]['collections'].update(status_map)
+
+                try:
+                    for app, app_data in org_results['apps'].items():
+                        app_data['summary'] = {
+                            'max_created': -1,
+                            'max_modified': -1,
+                            'min_created': 1584946416000,
+                            'min_modified': 1584946416000,
+                            'count': 0,
+                            'bytes': 0
+                        }
+
+                        if 'collections' in app_data:
+                            for collection, collection_data in app_data['collections'].items():
+
+                                app_data['summary']['count'] += collection_data['count']
+                                app_data['summary']['bytes'] += collection_data['bytes']
+
+                                org_results['summary']['count'] += collection_data['count']
+                                org_results['summary']['bytes'] += collection_data['bytes']
+
+                                # APP
+                                if collection_data.get('max_modified') > app_data['summary']['max_modified']:
+                                    app_data['summary']['max_modified'] = collection_data.get('max_modified')
+
+                                if collection_data.get('min_modified') < app_data['summary']['min_modified']:
+                                    app_data['summary']['min_modified'] = collection_data.get('min_modified')
+
+                                if collection_data.get('max_created') > app_data['summary']['max_created']:
+                                    app_data['summary']['max_created'] = collection_data.get('max_created')
+
+                                if collection_data.get('min_created') < app_data['summary']['min_created']:
+                                    app_data['summary']['min_created'] = collection_data.get('min_created')
+
+                                # ORG
+                                if collection_data.get('max_modified') > org_results['summary']['max_modified']:
+                                    org_results['summary']['max_modified'] = collection_data.get('max_modified')
+
+                                if collection_data.get('min_modified') < org_results['summary']['min_modified']:
+                                    org_results['summary']['min_modified'] = collection_data.get('min_modified')
+
+                                if collection_data.get('max_created') > org_results['summary']['max_created']:
+                                    org_results['summary']['max_created'] = collection_data.get('max_created')
+
+                                if collection_data.get('min_created') < org_results['summary']['min_created']:
+                                    org_results['summary']['min_created'] = collection_data.get('min_created')
+
+                        if QSIZE_OK:
+                            status_logger.warn('CURRENT Queue Depth: %s' % self.worker_queue.qsize())
+
+                        status_logger.warn('UPDATED status of org processed: %s' % json.dumps(org_results))
+
+                except KeyboardInterrupt as e:
+                    raise e
+
+                except:
+                    print(traceback.format_exc())
+
+            except KeyboardInterrupt as e:
+                status_logger.warn('FINAL status of org processed: %s' % json.dumps(org_results))
+                raise e
+
+            except Empty:
+                if QSIZE_OK:
+                    status_logger.warn('CURRENT Queue Depth: %s' % self.worker_queue.qsize())
+
+                status_logger.warn('CURRENT status of org processed: %s' % json.dumps(org_results))
+
+                status_logger.warning('EMPTY! Count=%s' % empty_count)
+
+                empty_count += 1
+
+                if empty_count >= 120:
+                    keep_going = False
+
+            except:
+                print(traceback.format_exc())
+
+        logger.warn('FINAL status of org processed: %s' % json.dumps(org_results))
+
+
+class EntityExportWorker(Process):
+    def __init__(self, work_queue, response_queue):
+        super(EntityExportWorker, self).__init__()
+        collection_worker_logger.debug('Creating worker!')
+        self.work_queue = work_queue
+        self.response_queue = response_queue
+
+    def run(self):
+
+        collection_worker_logger.info('starting run()...')
+        keep_going = True
+
+        empty_count = 0
+        app = 'NOT SET'
+        collection_name = 'NOT SET'
+        status_map = {}
+        entity_file = None
+
+        try:
+            while keep_going:
+
+                try:
+                    app, collection_name = self.work_queue.get(timeout=30)
+                    empty_count = 0
+
+                    status_map = self.process_collection(app, collection_name)
+
+                    status_map[collection_name]['iteration_finished'] = str(datetime.datetime.now())
+
+                    collection_worker_logger.warning(
+                            'Collection [%s / %s / %s] loop complete!  Max Created entity %s' % (
+                                config.get('org'), app, collection_name, status_map[collection_name]['max_created']))
+
+                    collection_worker_logger.warning(
+                            'Sending FINAL stats for app/collection [%s / %s]: %s' % (app, collection_name, status_map))
+
+                    self.response_queue.put((app, collection_name, status_map))
+
+                    collection_worker_logger.info('Done! Finished app/collection: %s / %s' % (app, collection_name))
+
+                except KeyboardInterrupt as e:
+                    raise e
+
+                except Empty:
+                    collection_worker_logger.warning('EMPTY! Count=%s' % empty_count)
+
+                    empty_count += 1
+
+                    if empty_count >= 2:
+                        keep_going = False
+
+                except Exception as e:
+                    logger.exception('Error in CollectionWorker processing collection [%s]' % collection_name)
+                    print(traceback.format_exc())
+
+        finally:
+            if entity_file is not None:
+                entity_file.close()
+
+            self.response_queue.put((app, collection_name, status_map))
+            collection_worker_logger.info('FINISHED!')
+
+    def process_collection(self, app, collection_name):
+
+        status_map = {
+            collection_name: {
+                'iteration_started': str(datetime.datetime.now()),
+                'max_created': -1,
+                'max_modified': -1,
+                'min_created': 1584946416000,
+                'min_modified': 1584946416000,
+                'count': 0,
+                'bytes': 0
+            }
+        }
+
+        # added a flag for using graph vs query/index
+        if config.get('graph', False):
+            source_collection_url = collection_graph_url_template.format(org=config.get('org'),
+                                                                         app=app,
+                                                                         collection=collection_name,
+                                                                         limit=config.get('limit'),
+                                                                         **config.get('source_endpoint'))
+        else:
+            source_collection_url = collection_query_url_template.format(org=config.get('org'),
+                                                                         app=app,
+                                                                         collection=collection_name,
+                                                                         limit=config.get('limit'),
+                                                                         ql="select * %s" % config.get(
+                                                                                 'ql'),
+                                                                         **config.get('source_endpoint'))
+        counter = 0
+
+        # use the UsergridQuery from the Python SDK to iterate the collection
+        q = UsergridQueryIterator(source_collection_url,
+                                  page_delay=config.get('page_sleep_time'),
+                                  sleep_time=config.get('error_retry_sleep'))
+
+        directory = os.path.join(config['export_path'], ECID, config['org'], app)
+
+        if not os.path.exists(directory):
+            os.makedirs(directory)
+
+        entity_filename = '_'.join([collection_name, 'entity-data'])
+        entity_filename_base = os.path.join(directory, entity_filename)
+        entity_file_number = 0
+        entity_file_counter = 0
+        entity_filename = '%s-%s.txt' % (entity_filename_base, entity_file_number)
+        entity_file = open(entity_filename, 'w')
+
+        edge_filename = '_'.join([collection_name, 'edge-data'])
+        edge_filename_base = os.path.join(directory, edge_filename)
+        edge_file_number = 0
+        edge_file_counter = 0
+        edge_filename = '%s-%s.txt' % (edge_filename_base, edge_file_number)
+        edge_file = open(edge_filename, 'w')
+
+        try:
+
+            for entity in q:
+                try:
+                    entity_file_counter += 1
+                    counter += 1
+
+                    if entity_file_counter > config['entities_per_file']:
+                        entity_file.close()
+                        entity_file_number += 1
+                        entity_file_counter = 0
+                        entity_filename = '%s-%s.txt' % (entity_filename_base, entity_file_number)
+                        entity_file = open(entity_filename, 'w')
+
+                    entity_file.write('%s\n' % json.dumps(entity))
+
+                    edge_names = get_edge_names(entity)
+
+                    for edge_name in edge_names:
+                        if not include_edge(collection_name, edge_name):
+                            continue
+
+                        connection_query_url = connection_query_url_template.format(
+                                org=config.get('org'),
+                                app=app,
+                                verb=edge_name,
+                                collection=collection_name,
+                                uuid=entity.get('uuid'),
+                                limit=config.get('limit'),
+                                **config.get('source_endpoint'))
+
+                        connection_query = UsergridQueryIterator(connection_query_url,
+                                                                 sleep_time=config.get('error_retry_sleep'))
+
+                        target_uuids = []
+
+                        try:
+                            for target_entity in connection_query:
+                                target_uuids.append(target_entity.get('uuid'))
+                        except:
+                            logger.exception('Error processing edge [%s] of entity [ %s / %s / %s]' % (
+                                edge_name, app, collection_name, entity.get('uuid')))
+
+                        if len(target_uuids) > 0:
+                            edge_file_counter += 1
+
+                            edges = {
+                                'entity': {
+                                    'type': entity.get('type'),
+                                    'uuid': entity.get('uuid')
+                                },
+                                'edge_name': edge_name,
+                                'target_uuids': target_uuids
+                            }
+
+                            if entity_file_counter > config['entities_per_file']:
+                                edge_file.close()
+                                edge_file_number += 1
+                                edge_file_counter = 0
+                                edge_filename = '%s-%s.txt' % (edge_filename_base, edge_file_number)
+                                edge_file = open(edge_filename, 'w')
+
+                            edge_file.write('%s\n' % json.dumps(edges))
+
+                    if 'created' in entity:
+
+                        try:
+                            entity_created = long(entity.get('created'))
+
+                            if entity_created > status_map[collection_name]['max_created']:
+                                status_map[collection_name]['max_created'] = entity_created
+                                status_map[collection_name]['max_created_str'] = str(
+                                        datetime.datetime.fromtimestamp(entity_created / 1000))
+
+                            if entity_created < status_map[collection_name]['min_created']:
+                                status_map[collection_name]['min_created'] = entity_created
+                                status_map[collection_name]['min_created_str'] = str(
+                                        datetime.datetime.fromtimestamp(entity_created / 1000))
+
+                        except ValueError:
+                            pass
+
+                    if 'modified' in entity:
+
+                        try:
+                            entity_modified = long(entity.get('modified'))
+
+                            if entity_modified > status_map[collection_name]['max_modified']:
+                                status_map[collection_name]['max_modified'] = entity_modified
+                                status_map[collection_name]['max_modified_str'] = str(
+                                        datetime.datetime.fromtimestamp(entity_modified / 1000))
+
+                            if entity_modified < status_map[collection_name]['min_modified']:
+                                status_map[collection_name]['min_modified'] = entity_modified
+                                status_map[collection_name]['min_modified_str'] = str(
+                                        datetime.datetime.fromtimestamp(entity_modified / 1000))
+
+                        except ValueError:
+                            pass
+
+                    status_map[collection_name]['bytes'] += count_bytes(entity)
+                    status_map[collection_name]['count'] += 1
+
+                    if counter % 1000 == 1:
+                        try:
+                            collection_worker_logger.warning(
+                                    'Sending incremental stats for app/collection [%s / %s]: %s' % (
+                                        app, collection_name, status_map))
+
+                            self.response_queue.put((app, collection_name, status_map))
+
+                            if QSIZE_OK:
+                                collection_worker_logger.info(
+                                        'Counter=%s, collection queue depth=%s' % (
+                                            counter, self.work_queue.qsize()))
+                        except:
+                            pass
+
+                        collection_worker_logger.warn(
+                                'Current status of collections processed: %s' % json.dumps(status_map))
+                except KeyboardInterrupt:
+                    raise
+
+                except:
+                    logger.exception(
+                            'Error processing entity %s / %s / %s' % (app, collection_name, entity.get('uuid')))
+
+        except KeyboardInterrupt:
+            raise
+
+        except:
+            logger.exception('Error processing collection %s / %s ' % (app, collection_name))
+
+        finally:
+            if edge_file is not None:
+                edge_file.close()
+
+            if entity_file is not None:
+                entity_file.close()
+
+        return status_map
+
+
+def use_name_for_collection(collection_name):
+    return collection_name in config.get('use_name_for_collection', [])
+
+
+def include_edge(collection_name, edge_name):
+    include_edges = config.get('include_edge', [])
+
+    if include_edges is None:
+        include_edges = []
+
+    exclude_edges = config.get('exclude_edge', [])
+
+    if exclude_edges is None:
+        exclude_edges = []
+
+    if len(include_edges) > 0 and edge_name not in include_edges:
+        logger.debug(
+                'Skipping edge [%s] since it is not in INCLUDED list: %s' % (edge_name, include_edges))
+        return False
+
+    if edge_name in exclude_edges:
+        logger.debug(
+                'Skipping edge [%s] since it is in EXCLUDED list: %s' % (edge_name, exclude_edges))
+        return False
+
+    if (collection_name in ['users', 'user'] and edge_name in ['roles', 'followers', 'groups',
+                                                               'feed', 'activities']) \
+            or (collection_name in ['device', 'devices'] and edge_name in ['users']) \
+            or (collection_name in ['receipts', 'receipt'] and edge_name in ['device', 'devices']):
+        # feed and activities are not retrievable...
+        # roles and groups will be more efficiently handled from the role/group -> user
+        # followers will be handled by 'following'
+        # do only this from user -> device
+        return False
+
+    return True
+
+
+def get_source_identifier(source_entity):
+    entity_type = source_entity.get('type')
+
+    source_identifier = source_entity.get('uuid')
+
+    if use_name_for_collection(entity_type):
+
+        if entity_type in ['user']:
+            source_identifier = source_entity.get('username')
+        else:
+            source_identifier = source_entity.get('name')
+
+        if source_identifier is None:
+            source_identifier = source_entity.get('uuid')
+            logger.warn('Using UUID for entity [%s / %s]' % (entity_type, source_identifier))
+
+    return source_identifier
+
+
+def include_collection(collection_name):
+    exclude = config.get('exclude_collection', [])
+
+    if exclude is not None and collection_name in exclude:
+        return False
+
+    return True
+
+
+def get_edge_names(entity):
+    out_edge_names = [edge_name for edge_name in entity.get('metadata', {}).get('collections', [])]
+    out_edge_names += [edge_name for edge_name in entity.get('metadata', {}).get('connections', [])]
+
+    return out_edge_names
+
+
+def get_uuid_time(the_uuid_string):
+    return time_uuid.TimeUUID(the_uuid_string).get_datetime()
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Org/App Migrator')
+
+    parser.add_argument('--log_dir',
+                        help='path to the place where logs will be written',
+                        default='./',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--log_level',
+                        help='log level - DEBUG, INFO, WARN, ERROR, CRITICAL',
+                        default='INFO',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to migrate',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of one or more apps to include, specify none to include all apps',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('-e', '--include_edge',
+                        help='Name of one or more edges/connection types to INCLUDE, specify none to include all edges',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('--exclude_edge',
+                        help='Name of one or more edges/connection types to EXCLUDE, specify none to include all edges',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('--exclude_collection',
+                        help='Name of one or more collections to EXCLUDE, specify none to include all collections',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('-c', '--collection',
+                        help='Name of one or more collections to include, specify none to include all collections',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('-s', '--source_config',
+                        help='The path to the source endpoint/org configuration file',
+                        type=str,
+                        default='source.json')
+
+    parser.add_argument('--export_path',
+                        help='The path to save the export files',
+                        type=str,
+                        default='.')
+
+    parser.add_argument('--limit',
+                        help='The number of entities to return per query request',
+                        type=int,
+                        default=100)
+
+    parser.add_argument('--entities_per_file',
+                        help='The number of entities to put in one JSON file',
+                        type=int,
+                        default=10000)
+
+    parser.add_argument('--error_retry_sleep',
+                        help='The number of seconds to wait between retrieving after an error',
+                        type=float,
+                        default=30)
+
+    parser.add_argument('--page_sleep_time',
+                        help='The number of seconds to wait between retrieving pages from the UsergridQueryIterator',
+                        type=float,
+                        default=.5)
+
+    parser.add_argument('--entity_sleep_time',
+                        help='The number of seconds to wait between retrieving pages from the UsergridQueryIterator',
+                        type=float,
+                        default=.1)
+
+    parser.add_argument('--workers',
+                        dest='collection_workers',
+                        help='The number of worker processes to do the migration',
+                        type=int,
+                        default=4)
+
+    parser.add_argument('--queue_size_max',
+                        help='The max size of entities to allow in the queue',
+                        type=int,
+                        default=100000)
+
+    parser.add_argument('--ql',
+                        help='The QL to use in the filter for reading data from collections',
+                        type=str,
+                        default='select * order by created asc')
+    # default='select * order by created asc')
+
+    parser.add_argument('--nohup',
+                        help='specifies not to use stdout for logging',
+                        action='store_true')
+
+    parser.add_argument('--graph',
+                        help='Use GRAPH instead of Query',
+                        dest='graph',
+                        action='store_true')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    config['collection_mapping'] = {}
+    config['app_mapping'] = {}
+    config['org_mapping'] = {}
+
+    with open(config.get('source_config'), 'r') as f:
+        config['source_config'] = json.load(f)
+
+    if config['exclude_collection'] is None:
+        config['exclude_collection'] = []
+
+    config['source_endpoint'] = config['source_config'].get('endpoint').copy()
+    config['source_endpoint'].update(config['source_config']['credentials'][config['org']])
+
+
+def wait_for(threads, label, sleep_time=60):
+    wait = True
+
+    logger.info('Starting to wait for [%s] threads with sleep time=[%s]' % (len(threads), sleep_time))
+
+    while wait:
+        wait = False
+        alive_count = 0
+
+        for t in threads:
+
+            if t.is_alive():
+                alive_count += 1
+                logger.info('Thread [%s] is still alive' % t.name)
+
+        if alive_count > 0:
+            wait = True
+            logger.info('Continuing to wait for [%s] threads with sleep time=[%s]' % (alive_count, sleep_time))
+            time.sleep(sleep_time)
+
+    logger.warn('All workers [%s] done!' % label)
+
+
+def count_bytes(entity):
+    entity_copy = entity.copy()
+
+    if 'metadata' in entity_copy:
+        del entity_copy['metadata']
+
+    entity_str = json.dumps(entity_copy)
+
+    return len(entity_str)
+
+
+def check_response_status(r, url, exit_on_error=True):
+    if r.status_code != 200:
+        logger.critical('HTTP [%s] on URL=[%s]' % (r.status_code, url))
+        logger.critical('Response: %s' % r.text)
+
+        if exit_on_error:
+            exit()
+
+
+def main():
+    global config
+
+    config = parse_args()
+    init()
+    init_logging()
+
+    status_map = {}
+
+    org_apps = {
+    }
+
+    if len(org_apps) == 0:
+        source_org_mgmt_url = org_management_url_template.format(org=config.get('org'),
+                                                                 limit=config.get('limit'),
+                                                                 **config.get('source_endpoint'))
+
+        print('Retrieving apps from [%s]' % source_org_mgmt_url)
+        logger.info('Retrieving apps from [%s]' % source_org_mgmt_url)
+
+        try:
+            # list the apps for the SOURCE org
+            logger.info('GET %s' % source_org_mgmt_url)
+            r = session_source.get(source_org_mgmt_url)
+
+            if r.status_code != 200:
+                logger.critical(
+                        'Abort processing: Unable to retrieve apps from [%s]: %s' % (source_org_mgmt_url, r.text))
+                exit()
+
+            logger.info(json.dumps(r.text))
+
+            org_apps = r.json().get('data')
+
+        except Exception as e:
+            logger.exception('ERROR Retrieving apps from [%s]' % source_org_mgmt_url)
+            print(traceback.format_exc())
+            logger.critical('Unable to retrieve apps from [%s] and will exit' % source_org_mgmt_url)
+            exit()
+
+    if _platform == "linux" or _platform == "linux2":
+        collection_queue = Queue(maxsize=config.get('queue_size_max'))
+        collection_response_queue = Queue(maxsize=config.get('queue_size_max'))
+    else:
+        collection_queue = Queue()
+        collection_response_queue = Queue()
+
+    logger.info('Starting entity_workers...')
+
+    status_listener = StatusListener(collection_response_queue, collection_queue)
+    status_listener.start()
+
+    # start the worker processes which will iterate the collections
+    collection_workers = [EntityExportWorker(collection_queue, collection_response_queue) for x in
+                          xrange(config.get('collection_workers'))]
+    [w.start() for w in collection_workers]
+
+    try:
+        apps_to_process = config.get('app')
+        collections_to_process = config.get('collection')
+
+        # iterate the apps retrieved from the org
+        for org_app in sorted(org_apps.keys()):
+            logger.info('Found SOURCE App: %s' % org_app)
+
+        time.sleep(3)
+
+        for org_app in sorted(org_apps.keys()):
+            parts = org_app.split('/')
+            app = parts[1]
+
+            # if apps are specified and the current app is not in the list, skip it
+            if apps_to_process and len(apps_to_process) > 0 and app not in apps_to_process:
+                logger.warning('Skipping app [%s] not included in process list [%s]' % (app, apps_to_process))
+                continue
+
+            logger.info('Processing app=[%s]' % app)
+
+            status_map[app] = {
+                'iteration_started': str(datetime.datetime.now()),
+                'max_created': -1,
+                'max_modified': -1,
+                'min_created': 1584946416000,
+                'min_modified': 1584946416000,
+                'count': 0,
+                'bytes': 0,
+                'collections': {}
+            }
+
+            # get the list of collections from the source org/app
+            source_app_url = app_url_template.format(org=config.get('org'),
+                                                     app=app,
+                                                     **config.get('source_endpoint'))
+            logger.info('GET %s' % source_app_url)
+
+            r_collections = session_source.get(source_app_url)
+
+            collection_attempts = 0
+
+            # sometimes this call was not working so I put it in a loop to force it...
+            while r_collections.status_code != 200 and collection_attempts < 5:
+                collection_attempts += 1
+                logger.warning('FAILED: GET (%s) [%s] URL: %s' % (r_collections.elapsed, r_collections.status_code,
+                                                                  source_app_url))
+                time.sleep(DEFAULT_RETRY_SLEEP)
+                r_collections = session_source.get(source_app_url)
+
+            if collection_attempts >= 5:
+                logger.critical('Unable to get collections at URL %s, skipping app' % source_app_url)
+                continue
+
+            app_response = r_collections.json()
+
+            logger.info('App Response: ' + json.dumps(app_response))
+
+            app_entities = app_response.get('entities', [])
+
+            if len(app_entities) > 0:
+                app_entity = app_entities[0]
+                collections = app_entity.get('metadata', {}).get('collections', {})
+                logger.info('Collection List: %s' % collections)
+
+                # iterate the collections which are returned.
+                for collection_name, collection_data in collections.iteritems():
+                    exclude_collections = config.get('exclude_collection', [])
+
+                    if exclude_collections is None:
+                        exclude_collections = []
+
+                    # filter out collections as configured...
+                    if collection_name in ignore_collections \
+                            or (len(collections_to_process) > 0 and collection_name not in collections_to_process) \
+                            or (len(exclude_collections) > 0 and collection_name in exclude_collections) \
+                            or (config.get('migrate') == 'credentials' and collection_name != 'users'):
+                        logger.warning('Skipping collection=[%s]' % collection_name)
+
+                        continue
+
+                    logger.info('Publishing app / collection: %s / %s' % (app, collection_name))
+
+                    collection_queue.put((app, collection_name))
+
+            status_map[app]['iteration_finished'] = str(datetime.datetime.now())
+
+            logger.info('Finished publishing collections for app [%s] !' % app)
+
+        # allow collection workers to finish
+        wait_for(collection_workers, label='collection_workers', sleep_time=30)
+
+        status_listener.terminate()
+
+    except KeyboardInterrupt:
+        logger.warning('Keyboard Interrupt, aborting...')
+        collection_queue.close()
+        collection_response_queue.close()
+
+        [os.kill(super(EntityExportWorker, p).pid, signal.SIGINT) for p in collection_workers]
+        os.kill(super(StatusListener, status_listener).pid, signal.SIGINT)
+
+        [w.terminate() for w in collection_workers]
+        status_listener.terminate()
+
+    logger.info('entity_workers DONE!')
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_migrator.py b/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_migrator.py
new file mode 100644
index 0000000..ae3f492
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/migration/usergrid_data_migrator.py
@@ -0,0 +1,2186 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from __future__ import print_function
+from __future__ import print_function
+from __future__ import print_function
+import os
+import uuid
+from Queue import Empty
+import argparse
+import json
+import logging
+import sys
+from multiprocessing import Queue, Process
+from sets import Set
+import time_uuid
+import datetime
+from cloghandler import ConcurrentRotatingFileHandler
+import requests
+import traceback
+import redis
+import time
+from sys import platform as _platform
+import signal
+from requests.auth import HTTPBasicAuth
+from usergrid import UsergridQueryIterator
+import urllib3
+
+__author__ = 'Jeff.West@yahoo.com'
+
+ECID = str(uuid.uuid1())
+key_version = 'v4'
+
+logger = logging.getLogger('GraphMigrator')
+worker_logger = logging.getLogger('Worker')
+collection_worker_logger = logging.getLogger('CollectionWorker')
+error_logger = logging.getLogger('ErrorLogger')
+audit_logger = logging.getLogger('AuditLogger')
+status_logger = logging.getLogger('StatusLogger')
+
+urllib3.disable_warnings()
+
+DEFAULT_CREATE_APPS = False
+DEFAULT_RETRY_SLEEP = 10
+DEFAULT_PROCESSING_SLEEP = 1
+
+queue = Queue()
+QSIZE_OK = False
+
+try:
+    queue.qsize()
+    QSIZE_OK = True
+except:
+    pass
+
+session_source = requests.Session()
+session_target = requests.Session()
+
+cache = None
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))
+
+    # root_logger.setLevel(logging.WARN)
+
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
+    logging.getLogger('boto').setLevel(logging.ERROR)
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+
+    log_formatter = logging.Formatter(
+            fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+            datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    stdout_logger = logging.StreamHandler(sys.stdout)
+    stdout_logger.setFormatter(log_formatter)
+    root_logger.addHandler(stdout_logger)
+
+    if stdout_enabled:
+        stdout_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))
+
+    # base log file
+
+    log_file_name = os.path.join(config.get('log_dir'),
+                                 '%s-%s-%s-migrator.log' % (config.get('org'), config.get('migrate'), ECID))
+
+    # ConcurrentRotatingFileHandler
+    rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name,
+                                                  mode='a',
+                                                  maxBytes=404857600,
+                                                  backupCount=0)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    error_log_file_name = os.path.join(config.get('log_dir'), '%s-%s-%s-migrator-errors.log' % (
+        config.get('org'), config.get('migrate'), ECID))
+
+    error_rotating_file = ConcurrentRotatingFileHandler(filename=error_log_file_name,
+                                                        mode='a',
+                                                        maxBytes=404857600,
+                                                        backupCount=0)
+    error_rotating_file.setFormatter(log_formatter)
+    error_rotating_file.setLevel(logging.ERROR)
+
+    root_logger.addHandler(error_rotating_file)
+
+
+entity_name_map = {
+    'users': 'username'
+}
+
+config = {}
+
+# URL Templates for Usergrid
+org_management_app_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_management_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_url_template = "{api_url}/{org}?client_id={client_id}&client_secret={client_secret}"
+app_url_template = "{api_url}/{org}/{app}?client_id={client_id}&client_secret={client_secret}"
+collection_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}"
+collection_query_url_template = "{api_url}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}&limit={limit}"
+collection_graph_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}&limit={limit}"
+connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}?client_id={client_id}&client_secret={client_secret}"
+connecting_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/connecting/{verb}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_uuid_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_name_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_type}/{target_name}?client_id={client_id}&client_secret={client_secret}"
+
+connection_create_by_pairs_url_template = "{api_url}/{org}/{app}/{source_type_id}/{verb}/{target_type_id}?client_id={client_id}&client_secret={client_secret}"
+
+get_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}&connections=none"
+get_entity_url_with_connections_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+put_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+permissions_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/permissions?client_id={client_id}&client_secret={client_secret}"
+
+user_credentials_url_template = "{api_url}/{org}/{app}/users/{uuid}/credentials"
+
+ignore_collections = ['activities', 'queues', 'events', 'notifications']
+
+
+class StatusListener(Process):
+    def __init__(self, status_queue, worker_queue):
+        super(StatusListener, self).__init__()
+        self.status_queue = status_queue
+        self.worker_queue = worker_queue
+
+    def run(self):
+        keep_going = True
+
+        org_results = {
+            'name': config.get('org'),
+            'apps': {},
+        }
+
+        empty_count = 0
+
+        status_file_name = os.path.join(config.get('log_dir'),
+                                        '%s-%s-%s-status.json' % (config.get('org'), config.get('migrate'), ECID))
+
+        while keep_going:
+
+            try:
+                app, collection, status_map = self.status_queue.get(timeout=60)
+                status_logger.info('Received status update for app/collection: [%s / %s]' % (app, collection))
+                empty_count = 0
+                org_results['summary'] = {
+                    'max_created': -1,
+                    'max_modified': -1,
+                    'min_created': 1584946416000,
+                    'min_modified': 1584946416000,
+                    'count': 0,
+                    'bytes': 0
+                }
+
+                if app not in org_results['apps']:
+                    org_results['apps'][app] = {
+                        'collections': {}
+                    }
+
+                org_results['apps'][app]['collections'].update(status_map)
+
+                try:
+                    for app, app_data in org_results['apps'].iteritems():
+                        app_data['summary'] = {
+                            'max_created': -1,
+                            'max_modified': -1,
+                            'min_created': 1584946416000,
+                            'min_modified': 1584946416000,
+                            'count': 0,
+                            'bytes': 0
+                        }
+
+                        if 'collections' in app_data:
+                            for collection, collection_data in app_data['collections'].iteritems():
+
+                                app_data['summary']['count'] += collection_data['count']
+                                app_data['summary']['bytes'] += collection_data['bytes']
+
+                                org_results['summary']['count'] += collection_data['count']
+                                org_results['summary']['bytes'] += collection_data['bytes']
+
+                                # APP
+                                if collection_data.get('max_modified') > app_data['summary']['max_modified']:
+                                    app_data['summary']['max_modified'] = collection_data.get('max_modified')
+
+                                if collection_data.get('min_modified') < app_data['summary']['min_modified']:
+                                    app_data['summary']['min_modified'] = collection_data.get('min_modified')
+
+                                if collection_data.get('max_created') > app_data['summary']['max_created']:
+                                    app_data['summary']['max_created'] = collection_data.get('max_created')
+
+                                if collection_data.get('min_created') < app_data['summary']['min_created']:
+                                    app_data['summary']['min_created'] = collection_data.get('min_created')
+
+                                # ORG
+                                if collection_data.get('max_modified') > org_results['summary']['max_modified']:
+                                    org_results['summary']['max_modified'] = collection_data.get('max_modified')
+
+                                if collection_data.get('min_modified') < org_results['summary']['min_modified']:
+                                    org_results['summary']['min_modified'] = collection_data.get('min_modified')
+
+                                if collection_data.get('max_created') > org_results['summary']['max_created']:
+                                    org_results['summary']['max_created'] = collection_data.get('max_created')
+
+                                if collection_data.get('min_created') < org_results['summary']['min_created']:
+                                    org_results['summary']['min_created'] = collection_data.get('min_created')
+
+                        if QSIZE_OK:
+                            status_logger.warn('CURRENT Queue Depth: %s' % self.worker_queue.qsize())
+
+                        status_logger.warn('UPDATED status of org processed: %s' % json.dumps(org_results))
+
+                        try:
+                            logger.info('Writing status to file: %s' % status_file_name)
+
+                            with open(status_file_name, 'w') as f:
+                                json.dump(org_results, f, indent=2)
+                        except:
+                            print traceback.format_exc()
+
+                except KeyboardInterrupt, e:
+                    raise e
+
+                except:
+                    print traceback.format_exc()
+
+            except KeyboardInterrupt, e:
+                status_logger.warn('FINAL status of org processed: %s' % json.dumps(org_results))
+                raise e
+
+            except Empty:
+                if QSIZE_OK:
+                    status_logger.warn('CURRENT Queue Depth: %s' % self.worker_queue.qsize())
+
+                status_logger.warn('CURRENT status of org processed: %s' % json.dumps(org_results))
+
+                status_logger.warning('EMPTY! Count=%s' % empty_count)
+
+                empty_count += 1
+
+                if empty_count >= 120:
+                    keep_going = False
+
+            except:
+                print traceback.format_exc()
+
+        logger.warn('FINAL status of org processed: %s' % json.dumps(org_results))
+
+        try:
+            logger.info('Writing final status to file: %s' % status_file_name)
+            with open(status_file_name, 'w') as f:
+                json.dump(org_results, f, indent=2)
+        except:
+            print traceback.format_exc()
+
+
+class EntityWorker(Process):
+    def __init__(self, queue, handler_function):
+        super(EntityWorker, self).__init__()
+
+        worker_logger.debug('Creating worker!')
+        self.queue = queue
+        self.handler_function = handler_function
+
+    def run(self):
+
+        worker_logger.info('starting run()...')
+        keep_going = True
+
+        count_processed = 0
+        empty_count = 0
+        start_time = int(time.time())
+
+        while keep_going:
+
+            try:
+                # get an entity with the app and collection name
+                app, collection_name, entity = self.queue.get(timeout=120)
+                empty_count = 0
+
+                # if entity.get('type') == 'user':
+                #     entity = confirm_user_entity(app, entity)
+
+                # the handler operation is the specified operation such as migrate_graph
+                if self.handler_function is not None:
+                    try:
+                        message_start_time = int(time.time())
+                        processed = self.handler_function(app, collection_name, entity)
+                        message_end_time = int(time.time())
+
+                        if processed:
+                            count_processed += 1
+
+                            total_time = message_end_time - start_time
+                            avg_time_per_message = total_time / count_processed
+                            message_time = message_end_time - message_start_time
+
+                            worker_logger.debug('Processed [%sth] entity = %s / %s / %s' % (
+                                count_processed, app, collection_name, entity.get('uuid')))
+
+                            if count_processed % 1000 == 1:
+                                worker_logger.info(
+                                        'Processed [%sth] entity = [%s / %s / %s] in [%s]s - avg time/message [%s]' % (
+                                            count_processed, app, collection_name, entity.get('uuid'), message_time,
+                                            avg_time_per_message))
+
+                    except KeyboardInterrupt, e:
+                        raise e
+
+                    except Exception, e:
+                        logger.exception('Error in EntityWorker processing message')
+                        print traceback.format_exc()
+
+            except KeyboardInterrupt, e:
+                raise e
+
+            except Empty:
+                worker_logger.warning('EMPTY! Count=%s' % empty_count)
+
+                empty_count += 1
+
+                if empty_count >= 2:
+                    keep_going = False
+
+            except Exception, e:
+                logger.exception('Error in EntityWorker run()')
+                print traceback.format_exc()
+
+
+class CollectionWorker(Process):
+    def __init__(self, work_queue, entity_queue, response_queue):
+        super(CollectionWorker, self).__init__()
+        collection_worker_logger.debug('Creating worker!')
+        self.work_queue = work_queue
+        self.response_queue = response_queue
+        self.entity_queue = entity_queue
+
+    def run(self):
+
+        collection_worker_logger.info('starting run()...')
+        keep_going = True
+
+        counter = 0
+        # max_created = 0
+        empty_count = 0
+        app = 'ERROR'
+        collection_name = 'NOT SET'
+        status_map = {}
+        sleep_time = 10
+
+        try:
+
+            while keep_going:
+
+                try:
+                    app, collection_name = self.work_queue.get(timeout=30)
+
+                    status_map = {
+                        collection_name: {
+                            'iteration_started': str(datetime.datetime.now()),
+                            'max_created': -1,
+                            'max_modified': -1,
+                            'min_created': 1584946416000,
+                            'min_modified': 1584946416000,
+                            'count': 0,
+                            'bytes': 0
+                        }
+                    }
+
+                    empty_count = 0
+
+                    # added a flag for using graph vs query/index
+                    if config.get('graph', False):
+                        source_collection_url = collection_graph_url_template.format(org=config.get('org'),
+                                                                                     app=app,
+                                                                                     collection=collection_name,
+                                                                                     limit=config.get('limit'),
+                                                                                     **config.get('source_endpoint'))
+                    else:
+                        source_collection_url = collection_query_url_template.format(org=config.get('org'),
+                                                                                     app=app,
+                                                                                     collection=collection_name,
+                                                                                     limit=config.get('limit'),
+                                                                                     ql="select * %s" % config.get(
+                                                                                             'ql'),
+                                                                                     **config.get('source_endpoint'))
+
+                    logger.info('Iterating URL: %s' % source_collection_url)
+
+                    # use the UsergridQuery from the Python SDK to iterate the collection
+                    q = UsergridQueryIterator(source_collection_url,
+                                              page_delay=config.get('page_sleep_time'),
+                                              sleep_time=config.get('error_retry_sleep'))
+
+                    for entity in q:
+
+                        # begin entity loop
+
+                        self.entity_queue.put((app, collection_name, entity))
+                        counter += 1
+
+                        if 'created' in entity:
+
+                            try:
+                                entity_created = long(entity.get('created'))
+
+                                if entity_created > status_map[collection_name]['max_created']:
+                                    status_map[collection_name]['max_created'] = entity_created
+                                    status_map[collection_name]['max_created_str'] = str(
+                                            datetime.datetime.fromtimestamp(entity_created / 1000))
+
+                                if entity_created < status_map[collection_name]['min_created']:
+                                    status_map[collection_name]['min_created'] = entity_created
+                                    status_map[collection_name]['min_created_str'] = str(
+                                            datetime.datetime.fromtimestamp(entity_created / 1000))
+
+                            except ValueError:
+                                pass
+
+                        if 'modified' in entity:
+
+                            try:
+                                entity_modified = long(entity.get('modified'))
+
+                                if entity_modified > status_map[collection_name]['max_modified']:
+                                    status_map[collection_name]['max_modified'] = entity_modified
+                                    status_map[collection_name]['max_modified_str'] = str(
+                                            datetime.datetime.fromtimestamp(entity_modified / 1000))
+
+                                if entity_modified < status_map[collection_name]['min_modified']:
+                                    status_map[collection_name]['min_modified'] = entity_modified
+                                    status_map[collection_name]['min_modified_str'] = str(
+                                            datetime.datetime.fromtimestamp(entity_modified / 1000))
+
+                            except ValueError:
+                                pass
+
+                        status_map[collection_name]['bytes'] += count_bytes(entity)
+                        status_map[collection_name]['count'] += 1
+
+                        if counter % 1000 == 1:
+                            try:
+                                collection_worker_logger.warning(
+                                        'Sending stats for app/collection [%s / %s]: %s' % (
+                                            app, collection_name, status_map))
+
+                                self.response_queue.put((app, collection_name, status_map))
+
+                                if QSIZE_OK:
+                                    collection_worker_logger.info(
+                                            'Counter=%s, collection queue depth=%s' % (
+                                                counter, self.work_queue.qsize()))
+                            except:
+                                pass
+
+                            collection_worker_logger.warn(
+                                    'Current status of collections processed: %s' % json.dumps(status_map))
+
+                        if config.get('entity_sleep_time') > 0:
+                            collection_worker_logger.debug(
+                                    'sleeping for [%s]s per entity...' % (config.get('entity_sleep_time')))
+                            time.sleep(config.get('entity_sleep_time'))
+                            collection_worker_logger.debug(
+                                    'STOPPED sleeping for [%s]s per entity...' % (config.get('entity_sleep_time')))
+
+                    # end entity loop
+
+                    status_map[collection_name]['iteration_finished'] = str(datetime.datetime.now())
+
+                    collection_worker_logger.warning(
+                            'Collection [%s / %s / %s] loop complete!  Max Created entity %s' % (
+                                config.get('org'), app, collection_name, status_map[collection_name]['max_created']))
+
+                    collection_worker_logger.warning(
+                            'Sending FINAL stats for app/collection [%s / %s]: %s' % (app, collection_name, status_map))
+
+                    self.response_queue.put((app, collection_name, status_map))
+
+                    collection_worker_logger.info('Done! Finished app/collection: %s / %s' % (app, collection_name))
+
+                except KeyboardInterrupt, e:
+                    raise e
+
+                except Empty:
+                    collection_worker_logger.warning('EMPTY! Count=%s' % empty_count)
+
+                    empty_count += 1
+
+                    if empty_count >= 2:
+                        keep_going = False
+
+                except Exception as e:
+                    logger.exception('Error in CollectionWorker processing collection [%s]' % collection_name)
+                    print traceback.format_exc()
+
+        finally:
+            self.response_queue.put((app, collection_name, status_map))
+            collection_worker_logger.info('FINISHED!')
+
+
+def use_name_for_collection(collection_name):
+    return collection_name in config.get('use_name_for_collection', [])
+
+
+def include_edge(collection_name, edge_name):
+    include_edges = config.get('include_edge', [])
+
+    if include_edges is None:
+        include_edges = []
+
+    exclude_edges = config.get('exclude_edge', [])
+
+    if exclude_edges is None:
+        exclude_edges = []
+
+    if len(include_edges) > 0 and edge_name not in include_edges:
+        logger.debug(
+                'Skipping edge [%s] since it is not in INCLUDED list: %s' % (edge_name, include_edges))
+        return False
+
+    if edge_name in exclude_edges:
+        logger.debug(
+                'Skipping edge [%s] since it is in EXCLUDED list: %s' % (edge_name, exclude_edges))
+        return False
+
+    if (collection_name in ['users', 'user'] and edge_name in ['followers', 'feed', 'activities']) \
+            or (collection_name in ['receipts', 'receipt'] and edge_name in ['device', 'devices']):
+        # feed and activities are not retrievable...
+        # roles and groups will be more efficiently handled from the role/group -> user
+        # followers will be handled by 'following'
+        # do only this from user -> device
+        return False
+
+    return True
+
+
+def exclude_edge(collection_name, edge_name):
+    exclude_edges = config.get('exclude_edge', [])
+
+    if exclude_edges is None:
+        exclude_edges = []
+
+    if edge_name in exclude_edges:
+        logger.debug('Skipping edge [%s] since it is in EXCLUDED list: %s' % (edge_name, exclude_edges))
+        return True
+
+    if (collection_name in ['users', 'user'] and edge_name in ['followers', 'feed', 'activities']) \
+            or (collection_name in ['receipts', 'receipt'] and edge_name in ['device', 'devices']):
+        # feed and activities are not retrievable...
+        # roles and groups will be more efficiently handled from the role/group -> user
+        # followers will be handled by 'following'
+        # do only this from user -> device
+        return True
+
+    return False
+
+
+def confirm_user_entity(app, source_entity, attempts=0):
+    attempts += 1
+
+    source_entity_url = get_entity_url_template.format(org=config.get('org'),
+                                                       app=app,
+                                                       collection='users',
+                                                       uuid=source_entity.get('username'),
+                                                       **config.get('source_endpoint'))
+
+    if attempts >= 5:
+        logger.warning('Punting after [%s] attempts to confirm user at URL [%s], will use the source entity...' % (
+            attempts, source_entity_url))
+
+        return source_entity
+
+    r = requests.get(url=source_entity_url)
+
+    if r.status_code == 200:
+        retrieved_entity = r.json().get('entities')[0]
+
+        if retrieved_entity.get('uuid') != source_entity.get('uuid'):
+            logger.info(
+                    'UUID of Source Entity [%s] differs from uuid [%s] of retrieved entity at URL=[%s] and will be substituted' % (
+                        source_entity.get('uuid'), retrieved_entity.get('uuid'), source_entity_url))
+
+        return retrieved_entity
+
+    elif 'service_resource_not_found' in r.text:
+
+        logger.warn('Unable to retrieve user at URL [%s], and will use source entity.  status=[%s] response: %s...' % (
+            source_entity_url, r.status_code, r.text))
+
+        return source_entity
+
+    else:
+        logger.error('After [%s] attempts to confirm user at URL [%s], received status [%s] message: %s...' % (
+            attempts, source_entity_url, r.status_code, r.text))
+
+        time.sleep(DEFAULT_RETRY_SLEEP)
+
+        return confirm_user_entity(app, source_entity, attempts)
+
+
+def create_connection(app, collection_name, source_entity, edge_name, target_entity):
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    source_identifier = get_source_identifier(source_entity)
+    target_identifier = get_source_identifier(target_entity)
+
+    source_type_id = '%s/%s' % (source_entity.get('type'), source_identifier)
+    target_type_id = '%s/%s' % (target_entity.get('type'), target_identifier)
+
+    if source_entity.get('type') == 'user':
+        source_type_id = '%s/%s' % ('users', source_entity.get('username'))
+
+    if target_entity.get('type') == 'user':
+        if edge_name == 'users':
+            target_type_id = target_entity.get('uuid')
+        else:
+            target_type_id = '%s/%s' % ('users', target_entity.get('uuid'))
+
+    if target_entity.get('type') == 'device':
+        if edge_name == 'devices':
+            target_type_id = target_entity.get('uuid')
+        else:
+            target_type_id = '%s/%s' % ('devices', target_entity.get('uuid'))
+
+    if target_entity.get('type') == 'receipt':
+        if edge_name == 'receipts':
+            target_type_id = target_entity.get('uuid')
+        else:
+            target_type_id = '%s/%s' % ('receipts', target_entity.get('uuid'))
+
+    create_connection_url = connection_create_by_pairs_url_template.format(
+            org=target_org,
+            app=target_app,
+            source_type_id=source_type_id,
+            verb=edge_name,
+            target_type_id=target_type_id,
+            **config.get('target_endpoint'))
+
+    if not config.get('skip_cache_read', False):
+        processed = cache.get(create_connection_url)
+
+        if processed not in [None, 'None']:
+            logger.debug('Skipping visited Edge: [%s / %s / %s] --[%s]--> [%s / %s / %s]: %s ' % (
+                app, collection_name, source_identifier, edge_name, target_app, target_entity.get('type'),
+                target_entity.get('name'), create_connection_url))
+
+            return True
+
+    logger.info('Connecting entity [%s / %s / %s] --[%s]--> [%s / %s / %s]: %s ' % (
+        app, collection_name, source_identifier, edge_name, target_app, target_entity.get('type'),
+        target_entity.get('name', target_entity.get('uuid')), create_connection_url))
+
+    attempts = 0
+
+    while attempts < 5:
+        attempts += 1
+
+        r_create = session_target.post(create_connection_url)
+
+        if r_create.status_code == 200:
+
+            if not config.get('skip_cache_write', False):
+                cache.set(create_connection_url, 1)
+
+            return True
+        else:
+            if r_create.status_code >= 500:
+
+                if attempts < 5:
+                    logger.warning('FAILED [%s] (will retry) to create connection at URL=[%s]: %s' % (
+                        r_create.status_code, create_connection_url, r_create.text))
+                    time.sleep(DEFAULT_RETRY_SLEEP)
+                else:
+                    logger.critical(
+                            'FAILED [%s] (WILL NOT RETRY - max attempts) to create connection at URL=[%s]: %s' % (
+                                r_create.status_code, create_connection_url, r_create.text))
+                    return False
+
+            elif r_create.status_code in [401, 404]:
+
+                if config.get('repair_data', False):
+                    logger.warning('FAILED [%s] (WILL attempt repair) to create connection at URL=[%s]: %s' % (
+                        r_create.status_code, create_connection_url, r_create.text))
+                    migrate_data(app, source_entity.get('type'), source_entity, force=True)
+                    migrate_data(app, target_entity.get('type'), target_entity, force=True)
+
+                else:
+                    logger.critical('FAILED [%s] (WILL NOT attempt repair) to create connection at URL=[%s]: %s' % (
+                        r_create.status_code, create_connection_url, r_create.text))
+
+            else:
+                logger.warning('FAILED [%s] (will retry) to create connection at URL=[%s]: %s' % (
+                    r_create.status_code, create_connection_url, r_create.text))
+
+    return False
+
+
+def process_edges(app, collection_name, source_entity, edge_name, connection_stack):
+
+    source_identifier = get_source_identifier(source_entity)
+
+    while len(connection_stack) > 0:
+
+        target_entity = connection_stack.pop()
+
+        if exclude_collection(collection_name) or exclude_collection(target_entity.get('type')):
+            logger.debug('EXCLUDING Edge (collection): [%s / %s / %s] --[%s]--> ?' % (
+                app, collection_name, source_identifier, edge_name ))
+            continue
+
+        create_connection(app, collection_name, source_entity, edge_name, target_entity)
+
+
+def migrate_out_graph_edge_type(app, collection_name, source_entity, edge_name, depth=0):
+    if not include_edge(collection_name, edge_name):
+        return True
+
+    source_uuid = source_entity.get('uuid')
+
+    key = '%s:edge:out:%s:%s' % (key_version, source_uuid, edge_name)
+
+    if not config.get('skip_cache_read', False):
+        date_visited = cache.get(key)
+
+        if date_visited not in [None, 'None']:
+            logger.info('Skipping EDGE [%s / %s --%s-->] - visited at %s' % (
+                collection_name, source_uuid, edge_name, date_visited))
+            return True
+        else:
+            cache.delete(key)
+
+    if not config.get('skip_cache_write', False):
+        cache.set(name=key, value=str(int(time.time())), ex=config.get('visit_cache_ttl', 3600 * 2))
+
+    logger.debug('Visiting EDGE [%s / %s (%s) --%s-->] at %s' % (
+        collection_name, source_uuid, get_uuid_time(source_uuid), edge_name, str(datetime.datetime.utcnow())))
+
+    response = True
+
+    source_identifier = get_source_identifier(source_entity)
+
+    count_edges = 0
+
+    logger.debug(
+            'Processing edge type=[%s] of entity [%s / %s / %s]' % (edge_name, app, collection_name, source_identifier))
+
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    connection_query_url = connection_query_url_template.format(
+            org=config.get('org'),
+            app=app,
+            verb=edge_name,
+            collection=collection_name,
+            uuid=source_identifier,
+            limit=config.get('limit'),
+            **config.get('source_endpoint'))
+
+    connection_query = UsergridQueryIterator(connection_query_url, sleep_time=config.get('error_retry_sleep'))
+
+    connection_stack = []
+
+    for target_entity in connection_query:
+        target_connection_collection = config.get('collection_mapping', {}).get(target_entity.get('type'),
+                                                                                target_entity.get('type'))
+
+        target_ok = migrate_graph(app, target_entity.get('type'), source_entity=target_entity, depth=depth)
+
+        if not target_ok:
+            logger.critical(
+                    'Error migrating TARGET entity data for connection [%s / %s / %s] --[%s]--> [%s / %s / %s]' % (
+                        app, collection_name, source_identifier, edge_name, app, target_connection_collection,
+                        target_entity.get('name', target_entity.get('uuid'))))
+
+        count_edges += 1
+        connection_stack.append(target_entity)
+
+    process_edges(app, collection_name, source_entity, edge_name, connection_stack)
+
+    return response
+
+
+def get_source_identifier(source_entity):
+    entity_type = source_entity.get('type')
+
+    source_identifier = source_entity.get('uuid')
+
+    if use_name_for_collection(entity_type):
+
+        if entity_type in ['user']:
+            source_identifier = source_entity.get('username')
+        else:
+            source_identifier = source_entity.get('name')
+
+        if source_identifier is None:
+            source_identifier = source_entity.get('uuid')
+            logger.warn('Using UUID for entity [%s / %s]' % (entity_type, source_identifier))
+
+    return source_identifier
+
+
+def include_collection(collection_name):
+    if collection_name in ['events']:
+        return False
+
+    include = config.get('collection', [])
+
+    if include is not None and len(include) > 0 and collection_name not in include:
+        return False
+
+    exclude = config.get('exclude_collection', [])
+
+    if exclude is not None and collection_name in exclude:
+        return False
+
+    return True
+
+
+def exclude_collection(collection_name):
+    exclude = config.get('exclude_collection', [])
+
+    if exclude is not None and collection_name in exclude:
+        return True
+
+    return False
+
+
+def migrate_in_graph_edge_type(app, collection_name, source_entity, edge_name, depth=0):
+    source_uuid = source_entity.get('uuid')
+    key = '%s:edges:in:%s:%s' % (key_version, source_uuid, edge_name)
+
+    if not config.get('skip_cache_read', False):
+        date_visited = cache.get(key)
+
+        if date_visited not in [None, 'None']:
+            logger.info('Skipping EDGE [--%s--> %s / %s] - visited at %s' % (
+                collection_name, source_uuid, edge_name, date_visited))
+            return True
+        else:
+            cache.delete(key)
+
+    if not config.get('skip_cache_write', False):
+        cache.set(name=key, value=str(int(time.time())), ex=config.get('visit_cache_ttl', 3600 * 2))
+
+    logger.debug('Visiting EDGE [--%s--> %s / %s (%s)] at %s' % (
+        edge_name, collection_name, source_uuid, get_uuid_time(source_uuid), str(datetime.datetime.utcnow())))
+
+    source_identifier = get_source_identifier(source_entity)
+
+    if exclude_collection(collection_name):
+        logger.debug('Excluding (Collection) entity [%s / %s / %s]' % (app, collection_name, source_uuid))
+        return True
+
+    if not include_edge(collection_name, edge_name):
+        return True
+
+    logger.debug(
+            'Processing edge type=[%s] of entity [%s / %s / %s]' % (edge_name, app, collection_name, source_identifier))
+
+    logger.debug('Processing IN edges type=[%s] of entity [ %s / %s / %s]' % (
+        edge_name, app, collection_name, source_uuid))
+
+    connecting_query_url = connecting_query_url_template.format(
+            org=config.get('org'),
+            app=app,
+            collection=collection_name,
+            uuid=source_uuid,
+            verb=edge_name,
+            limit=config.get('limit'),
+            **config.get('source_endpoint'))
+
+    connection_query = UsergridQueryIterator(connecting_query_url, sleep_time=config.get('error_retry_sleep'))
+
+    response = True
+
+    for e_connection in connection_query:
+        logger.debug('Triggering IN->OUT edge migration on entity [%s / %s / %s] ' % (
+            app, e_connection.get('type'), e_connection.get('uuid')))
+
+        response = migrate_graph(app, e_connection.get('type'), e_connection, depth) and response
+
+    return response
+
+
+def migrate_graph(app, collection_name, source_entity, depth=0):
+    depth += 1
+    source_uuid = source_entity.get('uuid')
+
+    # short circuit if the graph depth exceeds what was specified
+    if depth > config.get('graph_depth', 1):
+        logger.debug(
+                'Reached Max Graph Depth, stopping after [%s] on [%s / %s]' % (depth, collection_name, source_uuid))
+        return True
+    else:
+        logger.debug('Processing @ Graph Depth [%s]' % depth)
+
+    if exclude_collection(collection_name):
+        logger.warn('Ignoring entity in filtered collection [%s]' % collection_name)
+        return True
+
+    key = '%s:graph:%s' % (key_version, source_uuid)
+    entity_tag = '[%s / %s / %s (%s)]' % (app, collection_name, source_uuid, get_uuid_time(source_uuid))
+
+    if not config.get('skip_cache_read', False):
+        date_visited = cache.get(key)
+
+        if date_visited not in [None, 'None']:
+            logger.debug('Skipping GRAPH %s at %s' % (entity_tag, date_visited))
+            return True
+        else:
+            cache.delete(key)
+
+    logger.info('Visiting GRAPH %s at %s' % (entity_tag, str(datetime.datetime.utcnow())))
+
+    if not config.get('skip_cache_write', False):
+        cache.set(name=key, value=str(int(time.time())), ex=config.get('visit_cache_ttl', 3600 * 2))
+
+    # first, migrate data for current node
+    response = migrate_data(app, collection_name, source_entity)
+
+    # gather the outbound edge names
+    out_edge_names = [edge_name for edge_name in source_entity.get('metadata', {}).get('collections', [])]
+    out_edge_names += [edge_name for edge_name in source_entity.get('metadata', {}).get('connections', [])]
+
+    logger.debug('Entity %s has [%s] OUT edges' % (entity_tag, len(out_edge_names)))
+
+    # migrate each outbound edge type
+    for edge_name in out_edge_names:
+
+        if not exclude_edge(collection_name, edge_name):
+            response = migrate_out_graph_edge_type(app, collection_name, source_entity, edge_name, depth) and response
+
+        if config.get('prune', False):
+            prune_edge_by_name(edge_name, app, collection_name, source_entity)
+
+    # gather the inbound edge names
+    in_edge_names = [edge_name for edge_name in source_entity.get('metadata', {}).get('connecting', [])]
+
+    logger.debug('Entity %s has [%s] IN edges' % (entity_tag, len(in_edge_names)))
+
+    # migrate each inbound edge type
+    for edge_name in in_edge_names:
+
+        if not exclude_edge(collection_name, edge_name):
+            response = migrate_in_graph_edge_type(app, collection_name, source_entity, edge_name,
+                                                  depth) and response
+
+    return response
+
+
+def collect_entities(q):
+    response = {}
+
+    for e in q:
+        response[e.get('uuid')] = e
+
+    return response
+
+
+def prune_edge_by_name(edge_name, app, collection_name, source_entity):
+    if not include_edge(collection_name, edge_name):
+        return True
+
+    source_identifier = get_source_identifier(source_entity)
+    source_uuid = source_entity.get('uuid')
+
+    entity_tag = '[%s / %s / %s (%s)]' % (app, collection_name, source_uuid, get_uuid_time(source_uuid))
+
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    target_connection_query_url = connection_query_url_template.format(
+            org=target_org,
+            app=target_app,
+            verb=edge_name,
+            collection=target_collection,
+            uuid=source_identifier,
+            limit=config.get('limit'),
+            **config.get('target_endpoint'))
+
+    source_connection_query_url = connection_query_url_template.format(
+            org=config.get('org'),
+            app=app,
+            verb=edge_name,
+            collection=collection_name,
+            uuid=source_identifier,
+            limit=config.get('limit'),
+            **config.get('source_endpoint'))
+
+    source_connections = collect_entities(
+            UsergridQueryIterator(source_connection_query_url, sleep_time=config.get('error_retry_sleep')))
+
+    target_connections = collect_entities(
+            UsergridQueryIterator(target_connection_query_url, sleep_time=config.get('error_retry_sleep')))
+
+    delete_uuids = Set(target_connections.keys()) - Set(source_connections.keys())
+
+    if len(delete_uuids) > 0:
+        logger.info('Found [%s] edges to delete for entity %s' % (len(delete_uuids), entity_tag))
+
+        for delete_uuid in delete_uuids:
+            delete_connection_url = connection_create_by_uuid_url_template.format(
+                    org=target_org,
+                    app=target_app,
+                    verb=edge_name,
+                    collection=target_collection,
+                    uuid=source_identifier,
+                    target_uuid=delete_uuid,
+                    **config.get('target_endpoint'))
+
+            attempts = 0
+
+            while attempts < 5:
+                attempts += 1
+
+                r = session_target.delete(delete_connection_url)
+
+                if not config.get('skip_cache_write'):
+                    cache.delete(delete_connection_url)
+
+                if r.status_code == 200:
+                    logger.info('Pruned edge on attempt [%s] URL=[%s]' % (attempts, delete_connection_url))
+                    break
+                else:
+                    logger.error('Error [%s] on attempt [%s] deleting connection at URL=[%s]: %s' % (
+                        r.status_code, attempts, delete_connection_url, r.text))
+                    time.sleep(DEFAULT_RETRY_SLEEP)
+
+    return True
+
+
+def prune_graph(app, collection_name, source_entity):
+    source_uuid = source_entity.get('uuid')
+    key = '%s:prune_graph:%s' % (key_version, source_uuid)
+    entity_tag = '[%s / %s / %s (%s)]' % (app, collection_name, source_uuid, get_uuid_time(source_uuid))
+
+    if not config.get('skip_cache_read', False):
+        date_visited = cache.get(key)
+
+        if date_visited not in [None, 'None']:
+            logger.debug('Skipping PRUNE %s at %s' % (entity_tag, date_visited))
+            return True
+        else:
+            cache.delete(key)
+
+    logger.debug('pruning GRAPH %s at %s' % (entity_tag, str(datetime.datetime.utcnow())))
+    if not config.get('skip_cache_write', False):
+        cache.set(name=key, value=str(int(time.time())), ex=config.get('visit_cache_ttl', 3600 * 2))
+
+    if collection_name in config.get('exclude_collection', []):
+        logger.debug('Excluding (Collection) entity %s' % entity_tag)
+        return True
+
+    out_edge_names = [edge_name for edge_name in source_entity.get('metadata', {}).get('collections', [])]
+    out_edge_names += [edge_name for edge_name in source_entity.get('metadata', {}).get('connections', [])]
+
+    for edge_name in out_edge_names:
+        prune_edge_by_name(edge_name, app, collection_name, source_entity)
+
+
+def reput(app, collection_name, source_entity, attempts=0):
+    source_identifier = source_entity.get('uuid')
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    try:
+        target_entity_url_by_name = put_entity_url_template.format(org=target_org,
+                                                                   app=target_app,
+                                                                   collection=target_collection,
+                                                                   uuid=source_identifier,
+                                                                   **config.get('target_endpoint'))
+
+        r = session_source.put(target_entity_url_by_name, data=json.dumps({}))
+        if r.status_code != 200:
+            logger.info('HTTP [%s]: %s' % (target_entity_url_by_name, r.status_code))
+        else:
+            logger.debug('HTTP [%s]: %s' % (target_entity_url_by_name, r.status_code))
+
+    except:
+        pass
+
+
+def get_uuid_time(the_uuid_string):
+    return time_uuid.TimeUUID(the_uuid_string).get_datetime()
+
+
+def migrate_permissions(app, collection_name, source_entity, attempts=0):
+    if collection_name not in ['roles', 'role', 'group', 'groups']:
+        return True
+
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    source_identifier = get_source_identifier(source_entity)
+
+    source_permissions_url = permissions_url_template.format(org=config.get('org'),
+                                                             app=app,
+                                                             collection=collection_name,
+                                                             uuid=source_identifier,
+                                                             **config.get('source_endpoint'))
+
+    r = session_source.get(source_permissions_url)
+
+    if r.status_code != 200:
+        logger.error('Unable to get permissions at URL [%s]: %s' % (source_permissions_url, r.text))
+        return False
+
+    perm_response = r.json()
+
+    perms = perm_response.get('data', [])
+
+    logger.info('Migrating [%s / %s] with permissions %s' % (collection_name, source_identifier, perms))
+
+    if len(perms) > 0:
+        target_permissions_url = permissions_url_template.format(org=target_org,
+                                                                 app=target_app,
+                                                                 collection=target_collection,
+                                                                 uuid=source_identifier,
+                                                                 **config.get('target_endpoint'))
+
+        for permission in perms:
+            data = {'permission': permission}
+
+            logger.info('Posting permission %s to %s' % (json.dumps(data), target_permissions_url))
+
+            r = session_target.post(target_permissions_url, json.dumps(data))
+
+            if r.status_code != 200:
+                logger.error(
+                        'ERROR posting permission %s to URL=[%s]: %s' % (
+                            json.dumps(data), target_permissions_url, r.text))
+
+    return True
+
+
+def migrate_data(app, collection_name, source_entity, attempts=0, force=False):
+    if config.get('skip_data') and not force:
+        return True
+
+    # check the cache to see if this entity has changed
+    if not config.get('skip_cache_read', False) and not force:
+        try:
+            str_modified = cache.get(source_entity.get('uuid'))
+
+            if str_modified not in [None, 'None']:
+
+                modified = long(str_modified)
+
+                logger.debug('FOUND CACHE: %s = %s ' % (source_entity.get('uuid'), modified))
+
+                if modified <= source_entity.get('modified'):
+
+                    modified_date = datetime.datetime.utcfromtimestamp(modified / 1000)
+                    e_uuid = source_entity.get('uuid')
+
+                    uuid_datetime = time_uuid.TimeUUID(e_uuid).get_datetime()
+
+                    logger.debug('Skipping ENTITY: %s / %s / %s / %s (%s) / %s (%s)' % (
+                        config.get('org'), app, collection_name, e_uuid, uuid_datetime, modified, modified_date))
+                    return True
+                else:
+                    logger.debug('DELETING CACHE: %s ' % (source_entity.get('uuid')))
+                    cache.delete(source_entity.get('uuid'))
+        except:
+            logger.error('Error on checking cache for uuid=[%s]' % source_entity.get('uuid'))
+            logger.error(traceback.format_exc())
+
+    if exclude_collection(collection_name):
+        logger.warn('Excluding entity in filtered collection [%s]' % collection_name)
+        return True
+
+    # handle duplicate user case
+    if collection_name in ['users', 'user']:
+        source_entity = confirm_user_entity(app, source_entity)
+
+    source_identifier = get_source_identifier(source_entity)
+
+    logger.info('Visiting ENTITY data [%s / %s (%s) ] at %s' % (
+        collection_name, source_identifier, get_uuid_time(source_entity.get('uuid')), str(datetime.datetime.utcnow())))
+
+    entity_copy = source_entity.copy()
+
+    if 'metadata' in entity_copy:
+        entity_copy.pop('metadata')
+
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    try:
+        target_entity_url_by_name = put_entity_url_template.format(org=target_org,
+                                                                   app=target_app,
+                                                                   collection=target_collection,
+                                                                   uuid=source_identifier,
+                                                                   **config.get('target_endpoint'))
+
+        r = session_target.put(url=target_entity_url_by_name, data=json.dumps(entity_copy))
+
+        if attempts > 1:
+            logger.warn('Attempt [%s] to migrate entity [%s / %s] at URL [%s]' % (
+                attempts, collection_name, source_identifier, target_entity_url_by_name))
+        else:
+            logger.debug('Attempt [%s] to migrate entity [%s / %s] at URL [%s]' % (
+                attempts, collection_name, source_identifier, target_entity_url_by_name))
+
+        if r.status_code == 200:
+            # Worked => WE ARE DONE
+            logger.info(
+                    'migrate_data | success=[%s] | attempts=[%s] | entity=[%s / %s / %s] | created=[%s] | modified=[%s]' % (
+                        True, attempts, config.get('org'), app, source_identifier, source_entity.get('created'),
+                        source_entity.get('modified'),))
+
+            if not config.get('skip_cache_write', False):
+                logger.debug('SETTING CACHE | uuid=[%s] | modified=[%s]' % (
+                    source_entity.get('uuid'), str(source_entity.get('modified'))))
+
+                cache.set(source_entity.get('uuid'), str(source_entity.get('modified')))
+
+            if collection_name in ['role', 'group', 'roles', 'groups']:
+                migrate_permissions(app, collection_name, source_entity, attempts=0)
+
+            if collection_name in ['users', 'user']:
+                migrate_user_credentials(app, collection_name, source_entity, attempts=0)
+
+            return True
+
+        else:
+            logger.error('Failure [%s] on attempt [%s] to PUT url=[%s], entity=[%s] response=[%s]' % (
+                r.status_code, attempts, target_entity_url_by_name, json.dumps(source_entity), r.text))
+
+            if attempts >= 5:
+                logger.critical(
+                        'ABORT migrate_data | success=[%s] | attempts=[%s] | created=[%s] | modified=[%s] %s / %s / %s' % (
+                            True, attempts, source_entity.get('created'), source_entity.get('modified'), app,
+                            collection_name, source_identifier))
+
+                return False
+
+            if r.status_code == 400:
+
+                if target_collection in ['roles', 'role']:
+                    return repair_user_role(app, collection_name, source_entity)
+
+                elif target_collection in ['users', 'user']:
+                    return handle_user_migration_conflict(app, collection_name, source_entity)
+
+                elif 'duplicate_unique_property_exists' in r.text:
+                    logger.error(
+                            'WILL NOT RETRY (duplicate) [%s] attempts to PUT url=[%s], entity=[%s] response=[%s]' % (
+                                attempts, target_entity_url_by_name, json.dumps(source_entity), r.text))
+
+                    return False
+
+            elif r.status_code == 403:
+                logger.critical(
+                        'ABORT migrate_data | success=[%s] | attempts=[%s] | created=[%s] | modified=[%s] %s / %s / %s' % (
+                            False, attempts, source_entity.get('created'), source_entity.get('modified'), app,
+                            collection_name, source_identifier))
+                return False
+
+    except:
+        logger.error(traceback.format_exc())
+        logger.error('error in migrate_data on entity: %s' % json.dumps(source_entity))
+
+    logger.warn(
+            'UNSUCCESSFUL migrate_data | success=[%s] | attempts=[%s] | entity=[%s / %s / %s] | created=[%s] | modified=[%s]' % (
+                True, attempts, config.get('org'), app, source_identifier, source_entity.get('created'),
+                source_entity.get('modified'),))
+
+    return migrate_data(app, collection_name, source_entity, attempts=attempts + 1)
+
+
+def handle_user_migration_conflict(app, collection_name, source_entity, attempts=0, depth=0):
+    if collection_name in ['users', 'user']:
+        return False
+
+    username = source_entity.get('username')
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    target_entity_url = get_entity_url_template.format(org=target_org,
+                                                       app=target_app,
+                                                       collection=target_collection,
+                                                       uuid=username,
+                                                       **config.get('target_endpoint'))
+
+    # There is retry build in, here is the short circuit
+    if attempts >= 5:
+        logger.critical(
+                'Aborting after [%s] attempts to audit user [%s] at URL [%s]' % (attempts, username, target_entity_url))
+
+        return False
+
+    r = session_target.get(url=target_entity_url)
+
+    if r.status_code == 200:
+        target_entity = r.json().get('entities')[0]
+
+        if source_entity.get('created') < target_entity.get('created'):
+            return repair_user_role(app, collection_name, source_entity)
+
+    elif r.status_code / 100 == 5:
+        audit_logger.warning(
+                'CONFLICT: handle_user_migration_conflict failed attempt [%s] GET [%s] on TARGET URL=[%s] - : %s' % (
+                    attempts, r.status_code, target_entity_url, r.text))
+
+        time.sleep(DEFAULT_RETRY_SLEEP)
+
+        return handle_user_migration_conflict(app, collection_name, source_entity, attempts)
+
+    else:
+        audit_logger.error(
+                'CONFLICT: Failed handle_user_migration_conflict attempt [%s] GET [%s] on TARGET URL=[%s] - : %s' % (
+                    attempts, r.status_code, target_entity_url, r.text))
+
+        return False
+
+
+def get_best_source_entity(app, collection_name, source_entity, depth=0):
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    target_pk = 'uuid'
+
+    if target_collection in ['users', 'user']:
+        target_pk = 'username'
+    elif target_collection in ['roles', 'role']:
+        target_pk = 'name'
+
+    target_name = source_entity.get(target_pk)
+
+    # there should be no target entity now, we just need to decide which one from the source to use
+    source_entity_url_by_name = get_entity_url_template.format(org=config.get('org'),
+                                                               app=app,
+                                                               collection=collection_name,
+                                                               uuid=target_name,
+                                                               **config.get('source_endpoint'))
+
+    r_get_source_entity = session_source.get(source_entity_url_by_name)
+
+    # if we are able to get at the source by PK...
+    if r_get_source_entity.status_code == 200:
+
+        # extract the entity from the response
+        entity_from_get = r_get_source_entity.json().get('entities')[0]
+
+        return entity_from_get
+
+    elif r_get_source_entity.status_code / 100 == 4:
+        # wasn't found, get by QL and sort
+        source_entity_query_url = collection_query_url_template.format(org=config.get('org'),
+                                                                       app=app,
+                                                                       collection=collection_name,
+                                                                       ql='select * where %s=\'%s\' order by created asc' % (
+                                                                           target_pk, target_name),
+                                                                       limit=config.get('limit'),
+                                                                       **config.get('source_endpoint'))
+
+        logger.info('Attempting to determine best entity from query on URL %s' % source_entity_query_url)
+
+        q = UsergridQueryIterator(source_entity_query_url, sleep_time=config.get('error_retry_sleep'))
+
+        desired_entity = None
+
+        entity_counter = 0
+
+        for e in q:
+            entity_counter += 1
+
+            if desired_entity is None:
+                desired_entity = e
+
+            elif e.get('created') < desired_entity.get('created'):
+                desired_entity = e
+
+        if desired_entity is None:
+            logger.warn('Unable to determine best of [%s] entities from query on URL %s' % (
+                entity_counter, source_entity_query_url))
+
+            return source_entity
+
+        else:
+            return desired_entity
+
+    else:
+        return source_entity
+
+
+def repair_user_role(app, collection_name, source_entity, attempts=0, depth=0):
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    # For the users collection, there seemed to be cases where a USERNAME was created/existing with the a
+    # different UUID which caused a 'collision' - so the point is to delete the entity with the differing
+    # UUID by UUID and then do a recursive call to migrate the data - now that the collision has been cleared
+
+    target_pk = 'uuid'
+
+    if target_collection in ['users', 'user']:
+        target_pk = 'username'
+    elif target_collection in ['roles', 'role']:
+        target_pk = 'name'
+
+    target_name = source_entity.get(target_pk)
+
+    target_entity_url_by_name = get_entity_url_template.format(org=target_org,
+                                                               app=target_app,
+                                                               collection=target_collection,
+                                                               uuid=target_name,
+                                                               **config.get('target_endpoint'))
+
+    logger.warning('Repairing: Deleting name=[%s] entity at URL=[%s]' % (target_name, target_entity_url_by_name))
+
+    r = session_target.delete(target_entity_url_by_name)
+
+    if r.status_code == 200 or (r.status_code in [404, 401] and 'service_resource_not_found' in r.text):
+        logger.info('Deletion of entity at URL=[%s] was [%s]' % (target_entity_url_by_name, r.status_code))
+
+        best_source_entity = get_best_source_entity(app, collection_name, source_entity)
+
+        target_entity_url_by_uuid = get_entity_url_template.format(org=target_org,
+                                                                   app=target_app,
+                                                                   collection=target_collection,
+                                                                   uuid=best_source_entity.get('uuid'),
+                                                                   **config.get('target_endpoint'))
+
+        r = session_target.put(target_entity_url_by_uuid, data=json.dumps(best_source_entity))
+
+        if r.status_code == 200:
+            logger.info('Successfully repaired user at URL=[%s]' % target_entity_url_by_uuid)
+            return True
+
+        else:
+            logger.critical('Failed to PUT [%s] the desired entity  at URL=[%s]: %s' % (
+                r.status_code, target_entity_url_by_name, r.text))
+            return False
+
+    else:
+        # log an error and keep going if we cannot delete the entity at the specified URL.  Unlikely, but if so
+        # then this entity is borked
+        logger.critical(
+                'Deletion of entity at URL=[%s] FAILED [%s]: %s' % (target_entity_url_by_name, r.status_code, r.text))
+        return False
+
+
+def get_target_mapping(app, collection_name):
+    target_org = config.get('org_mapping', {}).get(config.get('org'), config.get('org'))
+    target_app = config.get('app_mapping', {}).get(app, app)
+    target_collection = config.get('collection_mapping', {}).get(collection_name, collection_name)
+    return target_app, target_collection, target_org
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Org/App Migrator')
+
+    parser.add_argument('--log_dir',
+                        help='path to the place where logs will be written',
+                        default='./',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('--log_level',
+                        help='log level - DEBUG, INFO, WARN, ERROR, CRITICAL',
+                        default='INFO',
+                        type=str,
+                        required=False)
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the org to migrate',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of one or more apps to include, specify none to include all apps',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('-e', '--include_edge',
+                        help='Name of one or more edges/connection types to INCLUDE, specify none to include all edges',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('--exclude_edge',
+                        help='Name of one or more edges/connection types to EXCLUDE, specify none to include all edges',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('--exclude_collection',
+                        help='Name of one or more collections to EXCLUDE, specify none to include all collections',
+                        required=False,
+                        action='append')
+
+    parser.add_argument('-c', '--collection',
+                        help='Name of one or more collections to include, specify none to include all collections',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--force_app',
+                        help='Necessary for using 2.0 as a source at times due to API issues.  Forces the specified app(s) to be processed, even if they are not returned from the list of apps in the API call',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--use_name_for_collection',
+                        help='Name of one or more collections to use [name] instead of [uuid] for creating entities and edges',
+                        default=[],
+                        action='append')
+
+    parser.add_argument('-m', '--migrate',
+                        help='Specifies what to migrate: data, connections, credentials, audit or none (just iterate '
+                             'the apps/collections)',
+                        type=str,
+                        choices=[
+                            'data',
+                            'prune',
+                            'none',
+                            'reput',
+                            'credentials',
+                            'graph',
+                            'permissions'
+                        ],
+                        default='data')
+
+    parser.add_argument('-s', '--source_config',
+                        help='The path to the source endpoint/org configuration file',
+                        type=str,
+                        default='source.json')
+
+    parser.add_argument('-d', '--target_config',
+                        help='The path to the target endpoint/org configuration file',
+                        type=str,
+                        default='destination.json')
+
+    parser.add_argument('--redis_socket',
+                        help='The path to the socket for redis to use',
+                        type=str)
+
+    parser.add_argument('--limit',
+                        help='The number of entities to return per query request',
+                        type=int,
+                        default=100)
+
+    parser.add_argument('-w', '--entity_workers',
+                        help='The number of worker processes to do the migration',
+                        type=int,
+                        default=16)
+
+    parser.add_argument('--visit_cache_ttl',
+                        help='The TTL of the cache of visiting nodes in the graph for connections',
+                        type=int,
+                        default=3600 * 2)
+
+    parser.add_argument('--error_retry_sleep',
+                        help='The number of seconds to wait between retrieving after an error',
+                        type=float,
+                        default=30)
+
+    parser.add_argument('--page_sleep_time',
+                        help='The number of seconds to wait between retrieving pages from the UsergridQueryIterator',
+                        type=float,
+                        default=0)
+
+    parser.add_argument('--entity_sleep_time',
+                        help='The number of seconds to wait between retrieving pages from the UsergridQueryIterator',
+                        type=float,
+                        default=0)
+
+    parser.add_argument('--collection_workers',
+                        help='The number of worker processes to do the migration',
+                        type=int,
+                        default=2)
+
+    parser.add_argument('--queue_size_max',
+                        help='The max size of entities to allow in the queue',
+                        type=int,
+                        default=100000)
+
+    parser.add_argument('--graph_depth',
+                        help='The graph depth to traverse to copy',
+                        type=int,
+                        default=3)
+
+    parser.add_argument('--queue_watermark_high',
+                        help='The point at which publishing to the queue will PAUSE until it is at or below low watermark',
+                        type=int,
+                        default=25000)
+
+    parser.add_argument('--min_modified',
+                        help='Break when encountering a modified date before this, per collection',
+                        type=int,
+                        default=0)
+
+    parser.add_argument('--max_modified',
+                        help='Break when encountering a modified date after this, per collection',
+                        type=long,
+                        default=3793805526000)
+
+    parser.add_argument('--queue_watermark_low',
+                        help='The point at which publishing to the queue will RESUME after it has reached the high watermark',
+                        type=int,
+                        default=5000)
+
+    parser.add_argument('--ql',
+                        help='The QL to use in the filter for reading data from collections',
+                        type=str,
+                        default='select * order by created asc')
+    # default='select * order by created asc')
+
+    parser.add_argument('--repair_data',
+                        help='Repair data when iterating/migrating graph but skipping data',
+                        action='store_true')
+
+    parser.add_argument('--prune',
+                        help='Prune the graph while processing (instead of the prune operation)',
+                        action='store_true')
+
+    parser.add_argument('--skip_data',
+                        help='Skip migrating data (useful for connections only)',
+                        action='store_true')
+
+    parser.add_argument('--skip_credentials',
+                        help='Skip migrating credentials',
+                        action='store_true')
+
+    parser.add_argument('--skip_cache_read',
+                        help='Skip reading the cache (modified timestamps and graph edges)',
+                        dest='skip_cache_read',
+                        action='store_true')
+
+    parser.add_argument('--skip_cache_write',
+                        help='Skip updating the cache with modified timestamps of entities and graph edges',
+                        dest='skip_cache_write',
+                        action='store_true')
+
+    parser.add_argument('--create_apps',
+                        help='Create apps at the target if they do not exist',
+                        dest='create_apps',
+                        action='store_true')
+
+    parser.add_argument('--nohup',
+                        help='specifies not to use stdout for logging',
+                        action='store_true')
+
+    parser.add_argument('--graph',
+                        help='Use GRAPH instead of Query',
+                        dest='graph',
+                        action='store_true')
+
+    parser.add_argument('--su_username',
+                        help='Superuser username',
+                        required=False,
+                        type=str)
+
+    parser.add_argument('--su_password',
+                        help='Superuser Password',
+                        required=False,
+                        type=str)
+
+    parser.add_argument('--inbound_connections',
+                        help='Name of the org to migrate',
+                        action='store_true')
+
+    parser.add_argument('--map_app',
+                        help="Multiple allowed: A colon-separated string such as 'apples:oranges' which indicates to"
+                             " put data from the app named 'apples' from the source endpoint into app named 'oranges' "
+                             "in the target endpoint",
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--map_collection',
+                        help="One or more colon-separated string such as 'cats:dogs' which indicates to put data from "
+                             "collections named 'cats' from the source endpoint into a collection named 'dogs' in the "
+                             "target endpoint, applicable globally to all apps",
+                        default=[],
+                        action='append')
+
+    parser.add_argument('--map_org',
+                        help="One or more colon-separated strings such as 'red:blue' which indicates to put data from "
+                             "org named 'red' from the source endpoint into a collection named 'blue' in the target "
+                             "endpoint",
+                        default=[],
+                        action='append')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def init():
+    global config
+
+    if config.get('migrate') == 'credentials':
+
+        if config.get('su_password') is None or config.get('su_username') is None:
+            message = 'ABORT: In order to migrate credentials, Superuser parameters (su_password, su_username) are required'
+            print message
+            logger.critical(message)
+            exit()
+
+    config['collection_mapping'] = {}
+    config['app_mapping'] = {}
+    config['org_mapping'] = {}
+
+    for mapping in config.get('map_collection', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['collection_mapping'][parts[0]] = parts[1]
+        else:
+            logger.warning('Skipping Collection mapping: [%s]' % mapping)
+
+    for mapping in config.get('map_app', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['app_mapping'][parts[0]] = parts[1]
+        else:
+            logger.warning('Skipping App mapping: [%s]' % mapping)
+
+    for mapping in config.get('map_org', []):
+        parts = mapping.split(':')
+
+        if len(parts) == 2:
+            config['org_mapping'][parts[0]] = parts[1]
+            logger.info('Mapping Org [%s] to [%s] from mapping [%s]' % (parts[0], parts[1], mapping))
+        else:
+            logger.warning('Skipping Org mapping: [%s]' % mapping)
+
+    with open(config.get('source_config'), 'r') as f:
+        config['source_config'] = json.load(f)
+
+    with open(config.get('target_config'), 'r') as f:
+        config['target_config'] = json.load(f)
+
+    if config['exclude_collection'] is None:
+        config['exclude_collection'] = []
+
+    config['source_endpoint'] = config['source_config'].get('endpoint').copy()
+    config['source_endpoint'].update(config['source_config']['credentials'][config['org']])
+
+    target_org = config.get('org_mapping', {}).get(config.get('org'), config.get('org'))
+
+    config['target_endpoint'] = config['target_config'].get('endpoint').copy()
+    config['target_endpoint'].update(config['target_config']['credentials'][target_org])
+
+
+def wait_for(threads, label, sleep_time=60):
+    wait = True
+
+    logger.info('Starting to wait for [%s] threads with sleep time=[%s]' % (len(threads), sleep_time))
+
+    while wait:
+        wait = False
+        alive_count = 0
+
+        for t in threads:
+
+            if t.is_alive():
+                alive_count += 1
+                logger.info('Thread [%s] is still alive' % t.name)
+
+        if alive_count > 0:
+            wait = True
+            logger.info('Continuing to wait for [%s] threads with sleep time=[%s]' % (alive_count, sleep_time))
+            time.sleep(sleep_time)
+
+    logger.warn('All workers [%s] done!' % label)
+
+
+def count_bytes(entity):
+    entity_copy = entity.copy()
+
+    if 'metadata' in entity_copy:
+        del entity_copy['metadata']
+
+    entity_str = json.dumps(entity_copy)
+
+    return len(entity_str)
+
+
+def migrate_user_credentials(app, collection_name, source_entity, attempts=0):
+    # this only applies to users
+    if collection_name not in ['users', 'user'] \
+            or config.get('skip_credentials', False):
+        return False
+
+    source_identifier = get_source_identifier(source_entity)
+
+    target_app, target_collection, target_org = get_target_mapping(app, collection_name)
+
+    # get the URLs for the source and target users
+
+    source_url = user_credentials_url_template.format(org=config.get('org'),
+                                                      app=app,
+                                                      uuid=source_identifier,
+                                                      **config.get('source_endpoint'))
+
+    target_url = user_credentials_url_template.format(org=target_org,
+                                                      app=target_app,
+                                                      uuid=source_identifier,
+                                                      **config.get('target_endpoint'))
+
+    # this endpoint for some reason uses basic auth...
+    r = requests.get(source_url, auth=HTTPBasicAuth(config.get('su_username'), config.get('su_password')))
+
+    if r.status_code != 200:
+        logger.error('Unable to migrate credentials due to HTTP [%s] on GET URL [%s]: %s' % (
+            r.status_code, source_url, r.text))
+
+        return False
+
+    source_credentials = r.json()
+
+    logger.info('Putting credentials to [%s]...' % target_url)
+
+    r = requests.put(target_url,
+                     data=json.dumps(source_credentials),
+                     auth=HTTPBasicAuth(config.get('su_username'), config.get('su_password')))
+
+    if r.status_code != 200:
+        logger.error(
+                'Unable to migrate credentials due to HTTP [%s] on PUT URL [%s]: %s' % (
+                    r.status_code, target_url, r.text))
+        return False
+
+    logger.info('migrate_user_credentials | success=[%s] | app/collection/name = %s/%s/%s' % (
+        True, app, collection_name, source_entity.get('uuid')))
+
+    return True
+
+
+def check_response_status(r, url, exit_on_error=True):
+    if r.status_code != 200:
+        logger.critical('HTTP [%s] on URL=[%s]' % (r.status_code, url))
+        logger.critical('Response: %s' % r.text)
+
+        if exit_on_error:
+            exit()
+
+
+def do_operation(apps_and_collections, operation):
+    status_map = {}
+
+    logger.info('Creating queues...')
+
+    # Mac, for example, does not support the max_size for a queue in Python
+    if _platform == "linux" or _platform == "linux2":
+        entity_queue = Queue(maxsize=config.get('queue_size_max'))
+        collection_queue = Queue(maxsize=config.get('queue_size_max'))
+        collection_response_queue = Queue(maxsize=config.get('queue_size_max'))
+    else:
+        entity_queue = Queue()
+        collection_queue = Queue()
+        collection_response_queue = Queue()
+
+    logger.info('Starting entity_workers...')
+
+    collection_count = 0
+    # create the entity workers, but only start them (later) if there is work to do
+    entity_workers = [EntityWorker(entity_queue, operation) for x in xrange(config.get('entity_workers'))]
+
+    # create the collection workers, but only start them (later) if there is work to do
+    collection_workers = [CollectionWorker(collection_queue, entity_queue, collection_response_queue) for x in
+                          xrange(config.get('collection_workers'))]
+
+    status_listener = StatusListener(collection_response_queue, entity_queue)
+
+    try:
+        # for each app, publish the (app_name, collection_name) to the queue.
+        # this is received by a collection worker who iterates the collection and publishes
+        # entities into a queue.  These are received by an individual entity worker which
+        # executes the specified operation on the entity
+
+        for app, app_data in apps_and_collections.get('apps', {}).iteritems():
+            logger.info('Processing app=[%s]' % app)
+
+            status_map[app] = {
+                'iteration_started': str(datetime.datetime.now()),
+                'max_created': -1,
+                'max_modified': -1,
+                'min_created': 1584946416000,
+                'min_modified': 1584946416000,
+                'count': 0,
+                'bytes': 0,
+                'collections': {}
+            }
+
+            # iterate the collections which are returned.
+            for collection_name in app_data.get('collections'):
+                logger.info('Publishing app / collection: %s / %s' % (app, collection_name))
+
+                collection_count += 1
+                collection_queue.put((app, collection_name))
+
+            logger.info('Finished publishing [%s] collections for app [%s] !' % (collection_count, app))
+
+        # only start the threads if there is work to do
+        if collection_count > 0:
+            status_listener.start()
+
+            # start the worker processes which will iterate the collections
+            [w.start() for w in collection_workers]
+
+            # start the worker processes which will do the work of migrating
+            [w.start() for w in entity_workers]
+
+            # allow collection workers to finish
+            wait_for(collection_workers, label='collection_workers', sleep_time=60)
+
+            # allow entity workers to finish
+            wait_for(entity_workers, label='entity_workers', sleep_time=60)
+
+            status_listener.terminate()
+
+    except KeyboardInterrupt:
+        logger.warning('Keyboard Interrupt, aborting...')
+        entity_queue.close()
+        collection_queue.close()
+        collection_response_queue.close()
+
+        [os.kill(super(EntityWorker, p).pid, signal.SIGINT) for p in entity_workers]
+        [os.kill(super(CollectionWorker, p).pid, signal.SIGINT) for p in collection_workers]
+        os.kill(super(StatusListener, status_listener).pid, signal.SIGINT)
+
+        [w.terminate() for w in entity_workers]
+        [w.terminate() for w in collection_workers]
+        status_listener.terminate()
+
+    logger.info('entity_workers DONE!')
+
+
+def filter_apps_and_collections(org_apps):
+    app_collecitons = {
+        'apps': {
+
+        }
+    }
+
+    try:
+        selected_apps = config.get('app')
+
+        # iterate the apps retrieved from the org
+        for org_app in sorted(org_apps.keys()):
+            logger.info('Found SOURCE App: %s' % org_app)
+
+        time.sleep(3)
+
+        for org_app in sorted(org_apps.keys()):
+            parts = org_app.split('/')
+            app = parts[1]
+
+            # if apps are specified and the current app is not in the list, skip it
+            if selected_apps and len(selected_apps) > 0 and app not in selected_apps:
+                logger.warning('Skipping app [%s] not included in process list [%s]' % (app, selected_apps))
+                continue
+
+            app_collecitons['apps'][app] = {
+                'collections': []
+            }
+
+            # get the list of collections from the source org/app
+            source_app_url = app_url_template.format(org=config.get('org'),
+                                                     app=app,
+                                                     **config.get('source_endpoint'))
+            logger.info('GET %s' % source_app_url)
+
+            r_collections = session_source.get(source_app_url)
+
+            collection_attempts = 0
+
+            # sometimes this call was not working so I put it in a loop to force it...
+            while r_collections.status_code != 200 and collection_attempts < 5:
+                collection_attempts += 1
+                logger.warning('FAILED: GET (%s) [%s] URL: %s' % (r_collections.elapsed, r_collections.status_code,
+                                                                  source_app_url))
+                time.sleep(DEFAULT_RETRY_SLEEP)
+                r_collections = session_source.get(source_app_url)
+
+            if collection_attempts >= 5:
+                logger.critical('Unable to get collections at URL %s, skipping app' % source_app_url)
+                continue
+
+            app_response = r_collections.json()
+
+            logger.info('App Response: ' + json.dumps(app_response))
+
+            app_entities = app_response.get('entities', [])
+
+            if len(app_entities) > 0:
+                app_entity = app_entities[0]
+                collections = app_entity.get('metadata', {}).get('collections', {})
+                logger.info('App=[%s] starting Collections=[%s]' % (app, collections))
+
+                app_collecitons['apps'][app]['collections'] = [c for c in collections if include_collection(c)]
+                logger.info('App=[%s] filtered Collections=[%s]' % (app, collections))
+
+    except:
+        print(traceback.format_exc())
+
+    return app_collecitons
+
+
+def confirm_target_org_apps(apps_and_collections):
+    for app in apps_and_collections.get('apps'):
+
+        # it is possible to map source orgs and apps to differently named targets.  This gets the
+        # target names for each
+        target_org = config.get('org_mapping', {}).get(config.get('org'), config.get('org'))
+        target_app = config.get('app_mapping', {}).get(app, app)
+
+        # Check that the target Org/App exists.  If not, move on to the next
+        target_app_url = app_url_template.format(org=target_org,
+                                                 app=target_app,
+                                                 **config.get('target_endpoint'))
+        logger.info('GET %s' % target_app_url)
+        r_target_apps = session_target.get(target_app_url)
+
+        if r_target_apps.status_code != 200:
+
+            if config.get('create_apps', DEFAULT_CREATE_APPS):
+                create_app_url = org_management_app_url_template.format(org=target_org,
+                                                                        app=target_app,
+                                                                        **config.get('target_endpoint'))
+                app_request = {'name': target_app}
+                r = session_target.post(create_app_url, data=json.dumps(app_request))
+
+                if r.status_code != 200:
+                    logger.critical('--create_apps specified and unable to create app [%s] at URL=[%s]: %s' % (
+                        target_app, create_app_url, r.text))
+                    logger.critical('Process will now exit')
+                    exit()
+                else:
+                    logger.warning('Created app=[%s] at URL=[%s]: %s' % (target_app, create_app_url, r.text))
+            else:
+                logger.critical('Target application DOES NOT EXIST at [%s] URL=%s' % (
+                    r_target_apps.status_code, target_app_url))
+                continue
+
+
+def main():
+    global config, cache
+
+    config = parse_args()
+    init()
+    init_logging()
+
+    logger.warn('Script starting')
+
+    try:
+        if config.get('redis_socket') is not None:
+            cache = redis.Redis(unix_socket_path=config.get('redis_socket'))
+
+        else:
+            # this does not try to connect to redis
+            cache = redis.StrictRedis(host='localhost', port=6379, db=0)
+
+        # this is necessary to test the connection to redis
+        cache.get('usergrid')
+
+    except:
+        logger.error(
+                'Error connecting to Redis cache, consider using Redis to be able to optimize the migration process...')
+        logger.error(
+                'Error connecting to Redis cache, consider using Redis to be able to optimize the migration process...')
+
+        time.sleep(3)
+
+        config['use_cache'] = False
+        config['skip_cache_read'] = True
+        config['skip_cache_write'] = True
+
+    org_apps = {
+    }
+
+    force_apps = config.get('force_app', [])
+
+    if force_apps is not None and len(force_apps) > 0:
+        logger.warn('Forcing only the following apps to be processed: %s' % force_apps)
+
+        for app in force_apps:
+            key = '%s/%s' % (app, app)
+            org_apps[key] = app
+
+    if len(org_apps) == 0:
+        source_org_mgmt_url = org_management_url_template.format(org=config.get('org'),
+                                                                 limit=config.get('limit'),
+                                                                 **config.get('source_endpoint'))
+
+        print('Retrieving apps from [%s]' % source_org_mgmt_url)
+        logger.info('Retrieving apps from [%s]' % source_org_mgmt_url)
+
+        try:
+            # list the apps for the SOURCE org
+            logger.info('GET %s' % source_org_mgmt_url)
+            r = session_source.get(source_org_mgmt_url)
+
+            if r.status_code != 200:
+                logger.critical(
+                        'Abort processing: Unable to retrieve apps from [%s]: %s' % (source_org_mgmt_url, r.text))
+                exit()
+
+            logger.info(json.dumps(r.text))
+
+            org_apps = r.json().get('data')
+
+        except Exception:
+            logger.exception('ERROR Retrieving apps from [%s]' % source_org_mgmt_url)
+            print(traceback.format_exc())
+            logger.critical('Unable to retrieve apps from [%s] and will exit' % source_org_mgmt_url)
+            exit()
+
+    # Check the specified configuration for what to migrate/audit
+    if config.get('migrate') == 'graph':
+        operation = migrate_graph
+
+    elif config.get('migrate') == 'data':
+        operation = migrate_data
+
+    elif config.get('migrate') == 'prune':
+        operation = prune_graph
+
+    elif config.get('migrate') == 'permissions':
+        operation = migrate_permissions
+        config['collection'] = ['roles', 'groups']
+        logger.warn(
+                'Since permissions migration was specified, overwriting included collections to be %s...' % config[
+                    'collection'])
+
+    elif config.get('migrate') == 'credentials':
+        operation = migrate_user_credentials
+        config['collection'] = ['users']
+        logger.warn('Since credential migration was specified, overwriting included collections to be %s' % config[
+            'collection'])
+
+    elif config.get('migrate') == 'reput':
+        operation = reput
+
+    else:
+        operation = None
+
+    # filter out the apps and collections based on the -c and --exclude_collection directives
+    apps_and_collections = filter_apps_and_collections(org_apps)
+
+    logger.warn('The following apps/collections will be processed: %s' % json.dumps(apps_and_collections))
+
+    # confirm the apps exist at the target/destination org
+    confirm_target_org_apps(apps_and_collections)
+
+    # execute the operation over apps and collections
+    do_operation(apps_and_collections, operation)
+
+    logger.warn('Script finished')
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/parse_importer/README.md b/utils/usergrid-util-python/usergrid_tools/parse_importer/README.md
new file mode 100644
index 0000000..3f75025
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/parse_importer/README.md
@@ -0,0 +1,90 @@
+# Data Importer for Parse.com Application Data Export
+
+## Overview
+
+This Python script uses the Usergrid Python SDK to iterate a data export from Parse.com to import it into a Usergrid instance
+
+## Usage
+
+```
+usage: parse_data_importer.py [-h] -o ORG -a APP --url URL -f FILE --tmp_dir
+                              TMP_DIR [--client_id CLIENT_ID]
+                              [--client_secret CLIENT_SECRET]
+
+Parse.com Data Importer for Usergrid
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -o ORG, --org ORG     Name of the org to import data into
+  -a APP, --app APP     Name of the app to import data into
+  --url URL             The URL of the Usergrid Instance to import data into
+  -f FILE, --file FILE  Full or relative path of the data file to import
+  --tmp_dir TMP_DIR     Directory where data file will be unzipped
+  --client_id CLIENT_ID
+                        The Client ID for using OAuth Tokens - necessary if
+                        app is secured
+  --client_secret CLIENT_SECRET
+                        The Client Secret for using OAuth Tokens - necessary
+                        if app is secured
+```
+
+## Features
+
+Support for:
+* Roles -> Users
+* Roles -> Roles
+* Custom entities
+* Joins implemented as Graph Edges with the name of 'joins' - in both directions
+* Pointers implemented as Graph Edges with the name of 'pointers' - in both directions on an object
+
+No Support for:
+* Products - In-App Purchases
+* Installations - Will map to 'Devices' at some point - important for Push Notifications perhaps
+* Binary Assets (Images) - Work in Progress to complete
+
+## Graph Edges in Usergrid
+
+Usergrid is a Graph Datastore and implements the concept of a Graph Edge in the form of a 'connection'.  Pointers, when found on an object, are implemented as follows:
+
+Source Entity --[Edge Name]--> Target Entity
+
+This is represented as a URL as follows: /{source_collection}/{source_entity_id}/pointers/{optional:target_type}.  A GET on this URL would return a list of entities which have this graph edge.  If a `{target_type}` is specified the results will be limited to entities of that type. 
+
+Examples: 
+* `GET /pets/max/pointers` - get the list of entities of all entity types which have a 'pointers' edge to them from the 'pet' 'max'
+* `GET /pets/max/pointers/owners` - get the list of entities of owners which have a 'pointers' edge to them from the 'pet' 'max'
+* `GET /pets/max/pointers/owners/jeff` - get the owner 'jeff' which has a 'pointers' edge to them from the 'pet' 'max'
+
+## Pointers
+
+Parse.com has support for pointers from one object to another.  For example, for a Pointer from a Pet to an Owner, the object might look as follows:
+ 
+```
+{
+  "fields" : "...",
+  "objectId": "A7Hdad8HD3",
+  "owner": {
+      "__type": "Pointer",
+      "className": "Owner",
+      "objectId": "QC41NHJJlU"
+  }
+}
+```
+
+
+## Joins
+Parse.com has support for the concept of a Join as well.  At the moment, Joining Users and Roles is supported and an attempt has been made to support arbitrary Joins based on the format of the `_Join:users:_Role.json` file found in my exported data.  The from/to types appear to be found in the filename.
+
+An example of the Join file is below:
+
+```
+{ "results": [
+	{
+        "owningId": "lxhMWzbeXa",
+        "relatedId": "MCU2Cv9nuk"
+    }
+] }
+```
+
+
+Joins are implemented as Graph Edges with the name of 'joins' - in both directions from the objects where the Join was found
diff --git a/utils/usergrid-util-python/usergrid_tools/parse_importer/__init__.py b/utils/usergrid-util-python/usergrid_tools/parse_importer/__init__.py
new file mode 100644
index 0000000..3b2a4e0
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/parse_importer/__init__.py
@@ -0,0 +1,21 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+__author__ = 'Jeff.West@yahoo.com'
+
diff --git a/utils/usergrid-util-python/usergrid_tools/parse_importer/parse_importer.py b/utils/usergrid-util-python/usergrid_tools/parse_importer/parse_importer.py
new file mode 100644
index 0000000..ed89116
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/parse_importer/parse_importer.py
@@ -0,0 +1,404 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+import logging
+from logging.handlers import RotatingFileHandler
+import os
+from os import listdir
+import zipfile
+from os.path import isfile
+import sys
+import argparse
+import traceback
+
+from usergrid import Usergrid
+from usergrid.UsergridClient import UsergridEntity
+
+__author__ = 'Jeff.West@yahoo.com'
+
+logger = logging.getLogger('UsergridParseImporter')
+
+parse_id_to_uuid_map = {}
+global_connections = {}
+config = {}
+
+
+def init_logging(stdout_enabled=True):
+    root_logger = logging.getLogger()
+    log_file_name = './usergrid_parse_importer.log'
+    log_formatter = logging.Formatter(fmt='%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s',
+                                      datefmt='%m/%d/%Y %I:%M:%S %p')
+
+    rotating_file = logging.handlers.RotatingFileHandler(filename=log_file_name,
+                                                         mode='a',
+                                                         maxBytes=2048576000,
+                                                         backupCount=10)
+    rotating_file.setFormatter(log_formatter)
+    rotating_file.setLevel(logging.INFO)
+
+    root_logger.addHandler(rotating_file)
+    root_logger.setLevel(logging.INFO)
+
+    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
+    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
+
+    if stdout_enabled:
+        stdout_logger = logging.StreamHandler(sys.stdout)
+        stdout_logger.setFormatter(log_formatter)
+        stdout_logger.setLevel(logging.INFO)
+        root_logger.addHandler(stdout_logger)
+
+
+def convert_parse_entity(collection, parse_entity):
+    parse_entity['type'] = collection
+
+    if 'name' not in parse_entity and collection.lower() != 'users':
+        parse_entity['name'] = parse_entity['objectId']
+
+    connections = {}
+
+    for name, value in parse_entity.iteritems():
+        if isinstance(value, dict):
+            if value.get('__type') == 'Pointer':
+                class_name = value.get('className') if value.get('className')[0] != '_' else value.get('className')[1:]
+                connections[value.get('objectId')] = class_name
+
+                logger.info('Connection found from [%s: %s] to entity [%s: %s]' % (
+                    collection, parse_entity['name'], class_name, value.get('objectId')))
+
+    return UsergridEntity(parse_entity), connections
+
+
+def build_usergrid_entity(collection, entity_uuid, data=None):
+    identifier = {'type': collection, 'uuid': entity_uuid}
+    data = {} if data is None else data
+    data.update(identifier)
+    return UsergridEntity(data)
+
+
+def load_users_and_roles(working_directory):
+    with open(os.path.join(working_directory, '_User.json'), 'r') as f:
+        users = json.load(f).get('results', [])
+        logger.info('Loaded [%s] Users' % len(users))
+
+    for i, parse_user in enumerate(users):
+        logger.info('Loading user [%s]: [%s / %s]' % (i, parse_user['username'], parse_user['objectId']))
+        usergrid_user, connections = convert_parse_entity('users', parse_user)
+        res = usergrid_user.save()
+
+        if res.ok:
+            logger.info('Saved user [%s]: [%s / %s]' % (i, parse_user['username'], parse_user['objectId']))
+
+            if 'uuid' in usergrid_user.entity_data:
+                parse_id_to_uuid_map[parse_user['objectId']] = usergrid_user.get('uuid')
+        else:
+            logger.error(
+                    'Error saving user [%s]: [%s / %s] - %s' % (i, parse_user['username'], parse_user['objectId'], res))
+
+    with open(os.path.join(working_directory, '_Role.json'), 'r') as f:
+        roles = json.load(f).get('results', [])
+        logger.info('Loaded [%s] Roles' % len(roles))
+
+    for i, parse_role in enumerate(roles):
+        logger.info('Loading role [%s]: [%s / %s]' % (i, parse_role['name'], parse_role['objectId']))
+        usergrid_role, connections = convert_parse_entity('roles', parse_role)
+        res = usergrid_role.save()
+
+        if res.ok:
+            logger.info('Saved role [%s]: [%s / %s]' % (i, parse_role['name'], parse_role['objectId']))
+
+            if 'uuid' in usergrid_role.entity_data:
+                parse_id_to_uuid_map[parse_role['objectId']] = usergrid_role.get('uuid')
+
+        else:
+            logger.error(
+                    'Error saving role [%s]: [%s / %s] - %s' % (i, parse_role['name'], parse_role['objectId'], res))
+
+    join_file = os.path.join(working_directory, '_Join:users:_Role.json')
+
+    if os.path.isfile(join_file) and os.path.getsize(join_file) > 0:
+        with open(join_file, 'r') as f:
+            users_to_roles = json.load(f).get('results', [])
+            logger.info('Loaded [%s] User->Roles' % len(users_to_roles))
+
+            for user_to_role in users_to_roles:
+                role_id = user_to_role['owningId']
+                role_uuid = parse_id_to_uuid_map.get(role_id)
+
+                target_role_id = user_to_role['relatedId']
+                target_role_uuid = parse_id_to_uuid_map.get(target_role_id)
+
+                if role_uuid is None or target_role_uuid is None:
+                    logger.error('Failed on assigning role [%s] to user [%s]' % (role_uuid, target_role_uuid))
+                    continue
+
+                target_role_entity = build_usergrid_entity('user', target_role_uuid)
+
+                res = Usergrid.assign_role(role_uuid, target_role_entity)
+
+                if res.ok:
+                    logger.info('Assigned role [%s] to user [%s]' % (role_uuid, target_role_uuid))
+                else:
+                    logger.error('Failed on assigning role [%s] to user [%s]' % (role_uuid, target_role_uuid))
+
+    else:
+        logger.info('No Users -> Roles to load')
+
+    join_file = os.path.join(working_directory, '_Join:roles:_Role.json')
+
+    if os.path.isfile(join_file) and os.path.getsize(join_file) > 0:
+        with open(join_file, 'r') as f:
+            users_to_roles = json.load(f).get('results', [])
+            logger.info('Loaded [%s] Roles->Roles' % len(users_to_roles))
+
+            for user_to_role in users_to_roles:
+                role_id = user_to_role['owningId']
+                role_uuid = parse_id_to_uuid_map.get(role_id)
+
+                target_role_id = user_to_role['relatedId']
+                target_role_uuid = parse_id_to_uuid_map.get(target_role_id)
+
+                if role_uuid is None or target_role_uuid is None:
+                    logger.error('Failed on assigning role [%s] to role [%s]' % (role_uuid, target_role_uuid))
+                    continue
+
+                target_role_entity = build_usergrid_entity('role', target_role_uuid)
+
+                res = Usergrid.assign_role(role_uuid, target_role_entity)
+
+                if res.ok:
+                    logger.info('Assigned role [%s] to role [%s]' % (role_uuid, target_role_uuid))
+                else:
+                    logger.error('Failed on assigning role [%s] to role [%s]' % (role_uuid, target_role_uuid))
+
+    else:
+        logger.info('No Roles -> Roles to load')
+
+
+def process_join_file(working_directory, join_file):
+    file_path = os.path.join(working_directory, join_file)
+
+    logger.warn('Processing Join file: %s' % file_path)
+
+    parts = join_file.split(':')
+
+    if len(parts) != 3:
+        logger.warn('Did not find expected 3 parts in JOIN filename: %s' % join_file)
+        return
+
+    related_type = parts[1]
+    owning_type = parts[2].split('.')[0]
+
+    owning_type = owning_type[1:] if owning_type[0] == '_' else owning_type
+
+    with open(file_path, 'r') as f:
+        try:
+            json_data = json.load(f)
+
+        except ValueError, e:
+            print traceback.format_exc(e)
+            logger.error('Unable to process file: %s' % file_path)
+            return
+
+        entities = json_data.get('results')
+
+        for join in entities:
+            owning_id = join.get('owningId')
+            related_id = join.get('relatedId')
+
+            owning_entity = build_usergrid_entity(owning_type, parse_id_to_uuid_map.get(owning_id))
+            related_entity = build_usergrid_entity(related_type, parse_id_to_uuid_map.get(related_id))
+
+            connect_entities(owning_entity, related_entity, 'joins')
+            connect_entities(related_entity, owning_entity, 'joins')
+
+
+def load_entities(working_directory):
+    files = [
+        f for f in listdir(working_directory)
+
+        if isfile(os.path.join(working_directory, f))
+        and os.path.getsize(os.path.join(working_directory, f)) > 0
+        and f not in ['_Join:roles:_Role.json',
+                      '_Join:users:_Role.json',
+                      '_User.json',
+                      '_Product.json',
+                      '_Installation.json',
+                      '_Role.json']
+        ]
+
+    # sort to put join files last...
+    for data_file in sorted(files):
+        if data_file[0:6] == '_Join:':
+            process_join_file(working_directory, data_file)
+            continue
+
+        file_path = os.path.join(working_directory, data_file)
+        collection = data_file.split('.')[0]
+
+        if collection[0] == '_':
+            logger.warn('Found internal type: [%s]' % collection)
+            collection = collection[1:]
+
+        if collection not in global_connections:
+            global_connections[collection] = {}
+
+        with open(file_path, 'r') as f:
+
+            try:
+                json_data = json.load(f)
+
+            except ValueError, e:
+                print traceback.format_exc(e)
+                logger.error('Unable to process file: %s' % file_path)
+                continue
+
+            entities = json_data.get('results')
+
+            logger.info('Found [%s] entities of type [%s]' % (len(entities), collection))
+
+            for parse_entity in entities:
+                usergrid_entity, connections = convert_parse_entity(collection, parse_entity)
+                response = usergrid_entity.save()
+
+                global_connections[collection][usergrid_entity.get('uuid')] = connections
+
+                if response.ok:
+                    logger.info('Saved Entity: %s' % parse_entity)
+                else:
+                    logger.info('Error saving entity %s: %s' % (parse_entity, response))
+
+
+def connect_entities(from_entity, to_entity, connection_name):
+    connect_response = from_entity.connect(connection_name, to_entity)
+
+    if connect_response.ok:
+        logger.info('Successfully connected [%s / %s]--[%s]-->[%s / %s]' % (
+            from_entity.get('type'), from_entity.get('uuid'), connection_name, to_entity.get('type'),
+            to_entity.get('uuid')))
+    else:
+        logger.error('Unable to connect [%s / %s]--[%s]-->[%s / %s]: %s' % (
+            from_entity.get('type'), from_entity.get('uuid'), connection_name, to_entity.get('type'),
+            to_entity.get('uuid'), connect_response))
+
+
+def create_connections():
+    for from_collection, entity_map in global_connections.iteritems():
+
+        for from_entity_uuid, entity_connections in entity_map.iteritems():
+            from_entity = build_usergrid_entity(from_collection, from_entity_uuid)
+
+            for to_entity_id, to_entity_collection in entity_connections.iteritems():
+                to_entity = build_usergrid_entity(to_entity_collection, parse_id_to_uuid_map.get(to_entity_id))
+
+                connect_entities(from_entity, to_entity, 'pointers')
+                connect_entities(to_entity, from_entity, 'pointers')
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Parse.com Data Importer for Usergrid')
+
+    parser.add_argument('-o', '--org',
+                        help='Name of the Usergrid Org to import data into - must already exist',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-a', '--app',
+                        help='Name of the Usergrid Application to import data into - must already exist',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('--url',
+                        help='The URL of the Usergrid Instance',
+                        type=str,
+                        required=True)
+
+    parser.add_argument('-f', '--file',
+                        help='Full or relative path of the data file to import',
+                        required=True,
+                        type=str)
+
+    parser.add_argument('--tmp_dir',
+                        help='Directory where data file will be unzipped',
+                        required=True,
+                        type=str)
+
+    parser.add_argument('--client_id',
+                        help='The Client ID for using OAuth Tokens - necessary if app is secured',
+                        required=False,
+                        type=str)
+
+    parser.add_argument('--client_secret',
+                        help='The Client Secret for using OAuth Tokens - necessary if app is secured',
+                        required=False,
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    return vars(my_args)
+
+
+def main():
+    global config
+    config = parse_args()
+
+    init_logging()
+
+    Usergrid.init(org_id=config.get('org'),
+                  app_id=config.get('app'),
+                  base_url=config.get('url'),
+                  client_id=config.get('client_id'),
+                  client_secret=config.get('client_secret'))
+
+    tmp_dir = config.get('tmp_dir')
+    file_path = config.get('file')
+
+    if not os.path.isfile(file_path):
+        logger.critical('File path specified [%s] is not a file!' % file_path)
+        logger.critical('Unable to continue')
+        exit(1)
+
+    if not os.path.isdir(tmp_dir):
+        logger.critical('Temp Directory path specified [%s] is not a directory!' % tmp_dir)
+        logger.critical('Unable to continue')
+        exit(1)
+
+    file_name = os.path.basename(file_path).split('.')[0]
+    working_directory = os.path.join(tmp_dir, file_name)
+
+    try:
+        with zipfile.ZipFile(file_path, 'r') as z:
+            logger.warn('Extracting files to directory: %s' % working_directory)
+            z.extractall(working_directory)
+            logger.warn('Extraction complete')
+
+    except Exception, e:
+        logger.critical(traceback.format_exc(e))
+        logger.critical('Extraction failed')
+        logger.critical('Unable to continue')
+        exit(1)
+
+    load_users_and_roles(working_directory)
+    load_entities(working_directory)
+    create_connections()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/permissions/README.md b/utils/usergrid-util-python/usergrid_tools/permissions/README.md
new file mode 100644
index 0000000..8398de6
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/permissions/README.md
@@ -0,0 +1,3 @@
+# Usergrid Tools (in Python)
+
+THIS WAS USED TO SET THE PERMISISONS for /devices because it is different from /device in Jersey 2.0 and how we use Shiro
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/permissions/permissions.py b/utils/usergrid-util-python/usergrid_tools/permissions/permissions.py
new file mode 100644
index 0000000..3da722b
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/permissions/permissions.py
@@ -0,0 +1,168 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from multiprocessing import Pool
+
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+# URL Templates for Usergrid
+import time
+
+org_management_app_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_management_url_template = "{api_url}/management/organizations/{org}/applications?client_id={client_id}&client_secret={client_secret}"
+org_url_template = "{api_url}/{org}?client_id={client_id}&client_secret={client_secret}"
+app_url_template = "{api_url}/{org}/{app}?client_id={client_id}&client_secret={client_secret}"
+collection_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}"
+collection_query_url_template = "{api_url}/{org}/{app}/{collection}?ql={ql}&client_id={client_id}&client_secret={client_secret}&limit={limit}"
+collection_graph_url_template = "{api_url}/{org}/{app}/{collection}?client_id={client_id}&client_secret={client_secret}&limit={limit}"
+connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}?client_id={client_id}&client_secret={client_secret}"
+connecting_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/connecting/{verb}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_uuid_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}?client_id={client_id}&client_secret={client_secret}"
+connection_create_by_name_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_type}/{target_name}?client_id={client_id}&client_secret={client_secret}"
+get_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}&connections=none"
+get_entity_url_with_connections_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+put_entity_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}?client_id={client_id}&client_secret={client_secret}"
+permissions_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/permissions?client_id={client_id}&client_secret={client_secret}"
+
+user_credentials_url_template = "{api_url}/{org}/{app}/users/{uuid}/credentials"
+
+org = 'myOrg'
+
+config = {
+    "endpoint": {
+        "api_url": "https://host",
+    },
+    "credentials": {
+        "myOrg": {
+            "client_id": "foo-zw",
+            "client_secret": "bar"
+        }
+    }
+}
+
+api_url = config.get('endpoint').get('api_url')
+
+all_creds = config.get('credentials')
+
+creds = config.get('credentials').get(org)
+
+
+def post(**kwargs):
+    # print kwargs
+    # print "curl -X POST \"%s\" -d '%s'" % (kwargs.get('url'), kwargs.get('data'))
+    return requests.post(**kwargs)
+
+
+def build_role(name, title):
+    role = {
+        'name': name,
+        'roleName': name,
+        'inactivity': 0,
+        'title': title
+    }
+
+    return role
+
+
+def set_default_role(app):
+    print app
+    role_name = 'guest'
+    role = build_role('guest', 'Guest')
+
+    # # put default role
+    role_url = put_entity_url_template.format(org=org,
+                                              app=app,
+                                              uuid=role_name,
+                                              collection='roles',
+                                              api_url=api_url,
+                                              **creds)
+    print 'DELETE ' + role_url
+
+    # # r = requests.delete(role_url)
+    # #
+    # # if r.status_code != 200:
+    # #     print 'ERROR ON DELETE'
+    # #     print r.text
+    # #
+    # # time.sleep(3)
+    #
+    # # # put default role
+    # role_collection_url = collection_url_template.format(org=org,
+    #                                                      app=app,
+    #                                                      collection='roles',
+    #                                                      api_url=api_url,
+    #                                                      **creds)
+    # print 'POST ' + role_collection_url
+    #
+    # r = post(url=role_collection_url, data=json.dumps(role))
+    #
+    # if r.status_code != 200:
+    #     print r.text
+
+    permissions_url = permissions_url_template.format(org=org,
+                                                      limit=1000,
+                                                      app=app,
+                                                      collection='roles',
+                                                      uuid=role_name,
+                                                      api_url=api_url,
+                                                      **creds)
+
+    r = post(url=permissions_url, data=json.dumps({'permission': 'post:/users'}))
+
+    r = post(url=permissions_url, data=json.dumps({'permission': 'put:/devices/*'}))
+    r = post(url=permissions_url, data=json.dumps({'permission': 'put,post:/devices'}))
+
+    r = post(url=permissions_url, data=json.dumps({'permission': 'put:/device/*'}))
+    r = post(url=permissions_url, data=json.dumps({'permission': 'put,post:/device'}))
+
+    if r.status_code != 200:
+        print r.text
+
+
+def list_apps():
+    apps = []
+    source_org_mgmt_url = org_management_url_template.format(org=org,
+                                                             limit=1000,
+                                                             api_url=api_url,
+                                                             **creds)
+
+    r = requests.get(source_org_mgmt_url)
+
+    print r.text
+
+    data = r.json().get('data')
+
+    for app_uuid in data:
+
+        if 'self-care' in app_uuid:
+            parts = app_uuid.split('/')
+            apps.append(parts[1])
+
+    return apps
+
+
+apps = list_apps()
+
+pool = Pool(12)
+
+pool.map(set_default_role, apps)
diff --git a/utils/usergrid-util-python/usergrid_tools/queue/README.md b/utils/usergrid-util-python/usergrid_tools/queue/README.md
new file mode 100644
index 0000000..fee63f7
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/queue/README.md
@@ -0,0 +1 @@
+dlq_requeue - used for taking messages out of one queue and putting them in another.  useful for reading Deadletter messages and reprocessing them.  You could also add filtering to the logic if you wanted.
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/queue/dlq-iterator-checker.py b/utils/usergrid-util-python/usergrid_tools/queue/dlq-iterator-checker.py
new file mode 100644
index 0000000..2e3abf4
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/queue/dlq-iterator-checker.py
@@ -0,0 +1,162 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+from multiprocessing.pool import Pool
+import argparse
+import json
+import datetime
+import os
+import time
+import sys
+
+import boto
+from boto import sqs
+import requests
+
+__author__ = 'Jeff.West@yahoo.com'
+
+sqs_conn = None
+sqs_queue = None
+
+# THIS WAS USED TO TAKE MESSAGES OUT OF THE DEAD LETTER AND TEST WHETHER THEY EXISTED OR NOT
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_time_remaining(count, rate):
+    if rate == 0:
+        return 'NaN'
+
+    seconds = count * 1.0 / rate
+
+    m, s = divmod(seconds, 60)
+    h, m = divmod(m, 60)
+
+    return "%d:%02d:%02d" % (h, m, s)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
+
+    parser.add_argument('-c', '--config',
+                        help='The queue to load into',
+                        type=str,
+                        default='4g.json')
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    print str(my_args)
+
+    return vars(my_args)
+
+
+def check_exists(sqs_message):
+    # checks whether an entity is deleted.  if the entity is found then it prints an error message.
+    # this was used when there were many messages going to DLQ and the reason was because the entity had been deleted
+    try:
+        message = json.loads(sqs_message.get_body())
+    except ValueError:
+        print 'Unable to decode JSON: %s' % sqs_message.get_body()
+        return
+    try:
+        for event_name, event_data in message.iteritems():
+            entity_id_scope = event_data.get('entityIdScope')
+            app_id = entity_id_scope.get('applicationScope', {}).get('application', {}).get('uuid')
+            entity_id_scope = entity_id_scope.get('id')
+            entity_id = entity_id_scope.get('uuid')
+            entity_type = entity_id_scope.get('type')
+
+            url = 'http://localhost:8080/{app_id}/{entity_type}/{entity_id}'.format(
+                app_id=app_id,
+                entity_id=entity_id,
+                entity_type=entity_type
+            )
+
+            url = 'https://{host}/{basepath}/{app_id}/{entity_type}/{entity_id}'.format(
+                host='REPLACE',
+                basepath='REPLACE',
+                app_id=app_id,
+                entity_id=entity_id,
+                entity_type=entity_type
+            )
+
+            r = requests.get(url=url,
+                             headers={
+                                 'Authorization': 'Bearer XCA'
+                             })
+
+            if r.status_code != 404:
+                print 'ERROR/FOUND [%s]: %s' % (r.status_code, url)
+            else:
+                print '[%s]: %s' % (r.status_code, url)
+                deleted = sqs_conn.delete_message_from_handle(sqs_queue, sqs_message.receipt_handle)
+                if not deleted:
+                    print 'no delete!'
+
+    except KeyboardInterrupt, e:
+        raise e
+
+
+def main():
+    global sqs_conn, sqs_queue
+    args = parse_args()
+
+    start_time = datetime.datetime.utcnow()
+    first_start_time = start_time
+
+    print "first start: %s" % first_start_time
+
+    with open(args.get('config'), 'r') as f:
+        config = json.load(f)
+
+    sqs_config = config.get('sqs')
+
+    sqs_conn = boto.sqs.connect_to_region(**sqs_config)
+    queue_name = 'baas20sr_usea_baas20sr_usea_index_all_dead'
+    sqs_queue = sqs_conn.get_queue(queue_name)
+
+    last_size = sqs_queue.count()
+
+    print 'Last Size: ' + str(last_size)
+
+    pool = Pool(10)
+
+    keep_going = True
+
+    while keep_going:
+        sqs_messages = sqs_queue.get_messages(
+            num_messages=10,
+            visibility_timeout=10,
+            wait_time_seconds=10)
+
+        if len(sqs_messages) > 0:
+            pool.map(check_exists, sqs_messages)
+        else:
+            print 'DONE!'
+            pool.terminate()
+            keep_going = False
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/queue/dlq_requeue.py b/utils/usergrid-util-python/usergrid_tools/queue/dlq_requeue.py
new file mode 100644
index 0000000..139d9a6
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/queue/dlq_requeue.py
@@ -0,0 +1,192 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import argparse
+import json
+import datetime
+import os
+import time
+import sys
+import uuid
+from Queue import Empty
+
+import boto
+from boto import sqs
+from multiprocessing import Process, Queue
+
+from boto.sqs.message import RawMessage
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_time_remaining(count, rate):
+    if rate == 0:
+        return 'NaN'
+
+    seconds = count * 1.0 / rate
+
+    m, s = divmod(seconds, 60)
+    h, m = divmod(m, 60)
+
+    return "%d:%02d:%02d" % (h, m, s)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
+
+    parser.add_argument('--readers',
+                        help='The queue to load into',
+                        type=int,
+                        default=10)
+
+    parser.add_argument('--writers',
+                        help='The queue to load into',
+                        type=int,
+                        default=10)
+
+    parser.add_argument('-c', '--config',
+                        help='The queue to load into',
+                        type=str,
+                        default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
+
+    parser.add_argument('--source_queue_name',
+                        help='The queue name to send messages to.  If not specified the filename is used',
+                        default='entities',
+                        type=str)
+
+    parser.add_argument('--target_queue_name',
+                        help='The queue name to send messages to.  If not specified the filename is used',
+                        default='entities',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    print str(my_args)
+
+    return vars(my_args)
+
+
+class Writer(Process):
+    def __init__(self, queue_name, sqs_config, work_queue):
+        super(Writer, self).__init__()
+        self.queue_name = queue_name
+        self.sqs_config = sqs_config
+        self.work_queue = work_queue
+
+    def run(self):
+        sqs_conn = boto.sqs.connect_to_region(**self.sqs_config)
+
+        sqs_queue = sqs_conn.get_queue(self.queue_name)
+        sqs_queue.set_message_class(RawMessage)
+        counter = 0
+
+        # note that there is a better way but this way works.  update would be to use the batch interface
+
+        batch = []
+
+        while True:
+            try:
+                body = self.work_queue.get(timeout=10)
+                counter += 1
+
+                if counter % 100 == 1:
+                    print 'WRITER %s' % counter
+
+                batch.append((str(uuid.uuid1()), body, 0))
+
+                if len(batch) >= 10:
+                    print 'WRITING BATCH'
+                    sqs_queue.write_batch(batch, delay_seconds=300)
+                    batch = []
+
+            except Empty:
+
+                if len(batch) > 0:
+                    print 'WRITING BATCH'
+                    sqs_queue.write_batch(batch, delay_seconds=300)
+                    batch = []
+
+
+class Reader(Process):
+    def __init__(self, queue_name, sqs_config, work_queue):
+        super(Reader, self).__init__()
+        self.queue_name = queue_name
+        self.sqs_config = sqs_config
+        self.work_queue = work_queue
+
+    def run(self):
+
+        sqs_conn = boto.sqs.connect_to_region(**self.sqs_config)
+
+        sqs_queue = sqs_conn.get_queue(self.queue_name)
+        sqs_queue.set_message_class(RawMessage)
+
+        message_counter = 0
+
+        while True:
+
+            messages = sqs_queue.get_messages(num_messages=10)
+            print 'Read %s messages' % (len(messages))
+            for message in messages:
+                message_counter += 1
+
+                if message_counter % 100 == 1:
+                    print 'READ: %s' % message_counter
+
+                body = message.get_body()
+                self.work_queue.put(body)
+
+            sqs_queue.delete_message_batch(messages)
+
+
+def main():
+    args = parse_args()
+
+    source_queue_name = args.get('source_queue_name')
+    target_queue_name = args.get('target_queue_name')
+
+    start_time = datetime.datetime.utcnow()
+    first_start_time = start_time
+
+    print "first start: %s" % first_start_time
+
+    with open(args.get('config'), 'r') as f:
+        config = json.load(f)
+
+    sqs_config = config.get('sqs')
+
+    work_queue = Queue()
+
+    readers = [Reader(source_queue_name, sqs_config, work_queue) for r in xrange(args.get('readers'))]
+    [r.start() for r in readers]
+
+    writers = [Writer(target_queue_name, sqs_config, work_queue) for r in xrange(args.get('writers'))]
+    [w.start() for w in writers]
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/queue/queue-config-sample.json b/utils/usergrid-util-python/usergrid_tools/queue/queue-config-sample.json
new file mode 100644
index 0000000..2b20d71
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/queue/queue-config-sample.json
@@ -0,0 +1,22 @@
+{
+  "ug_base_url": "<your usergrid api endpoint>",
+
+  "sqs": {
+    "region_name": "<aws region for using SQS>",
+    "aws_access_key_id": "<aws key for: creating queue, writing messages>",
+    "aws_secret_access_key": "<aws secret>"
+  },
+
+  "credential_map": {
+    "example1":{''
+      "comments": "for each org you want to load/publish entities to there should be an entry in this map with the org name as the key and the client_id and secret to use when publishing entities",
+      "client_id": "<client_id>",
+      "client_secret": "<client_secret>"
+    },
+    "example2":{
+      "comments": "for each org you want to load/publish entities to there should be an entry in this map with the org name as the key and the client_id and secret to use when publishing entities",
+      "client_id": "<client_id>",
+      "client_secret": "<client_secret>"
+    }
+  }
+}
\ No newline at end of file
diff --git a/utils/usergrid-util-python/usergrid_tools/queue/queue_cleaner.py b/utils/usergrid-util-python/usergrid_tools/queue/queue_cleaner.py
new file mode 100644
index 0000000..1e0983a
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/queue/queue_cleaner.py
@@ -0,0 +1,174 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import argparse
+import json
+import datetime
+import os
+import time
+import sys
+
+import boto
+from boto import sqs
+from multiprocessing import Process, Queue
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+def total_seconds(td):
+    return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
+
+
+def total_milliseconds(td):
+    return (td.microseconds + td.seconds * 1000000) / 1000
+
+
+def get_time_remaining(count, rate):
+    if rate == 0:
+        return 'NaN'
+
+    seconds = count * 1.0 / rate
+
+    m, s = divmod(seconds, 60)
+    h, m = divmod(m, 60)
+
+    return "%d:%02d:%02d" % (h, m, s)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
+
+    parser.add_argument('-c', '--config',
+                        help='The queue to load into',
+                        type=str,
+                        default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
+
+    parser.add_argument('-q', '--queue_name',
+                        help='The queue name to send messages to.  If not specified the filename is used',
+                        default='entities',
+                        type=str)
+
+    my_args = parser.parse_args(sys.argv[1:])
+
+    print str(my_args)
+
+    return vars(my_args)
+
+
+class Deleter(Process):
+    def __init__(self, queue_name, sqs_config, work_queue):
+        super(Deleter, self).__init__()
+        self.queue_name = queue_name
+        self.sqs_config = sqs_config
+        self.work_queue = work_queue
+
+    def run(self):
+        sqs_conn = boto.sqs.connect_to_region(**self.sqs_config)
+
+        # queue = sqs_conn.get_queue(self.queue_name)
+
+        while True:
+                delete_me = self.work_queue.get()
+                delete_me.delete()
+                print 'foo'
+
+
+class Worker(Process):
+    def __init__(self, queue_name, sqs_config, delete_queue):
+        super(Worker, self).__init__()
+        self.queue_name = queue_name
+        self.sqs_config = sqs_config
+        self.delete_queue = delete_queue
+
+    def run(self):
+
+        sqs_conn = boto.sqs.connect_to_region(**self.sqs_config)
+
+        queue = sqs_conn.get_queue(self.queue_name)
+
+        last_size = queue.count()
+
+        print 'Starting Size: %s' % last_size
+
+        delete_counter = 0
+        message_counter = 0
+
+        while True:
+
+            messages = queue.get_messages(num_messages=10, visibility_timeout=300)
+
+            for message in messages:
+                message_counter += 1
+                body = message.get_body()
+
+                try:
+
+                    msg = json.loads(body)
+
+                    if 'entityDeleteEvent' in msg:
+                        if msg['entityDeleteEvent']['entityIdScope']['id']['type'] == 'stock':
+
+                            self.delete_queue.put(message)
+                            delete_counter += 1
+
+                            if delete_counter % 100 == 0:
+                                print 'Deleted %s of %s' % (delete_counter, message_counter)
+                    else:
+                        # set it eligible to be read again
+                        message.change_visibility(0)
+
+                        print json.dumps(msg)
+
+                except:
+                    pass
+
+
+
+
+def main():
+    args = parse_args()
+
+    queue_name = args.get('queue_name')
+
+    print 'queue_name=%s' % queue_name
+
+    start_time = datetime.datetime.utcnow()
+    first_start_time = start_time
+
+    print "first start: %s" % first_start_time
+
+    with open(args.get('config'), 'r') as f:
+        config = json.load(f)
+
+    sqs_config = config.get('sqs')
+    last_time = datetime.datetime.utcnow()
+
+    work_queue = Queue()
+
+    deleters = [Deleter(queue_name, sqs_config, work_queue) for x in xrange(100)]
+    [w.start() for w in deleters]
+
+    workers = [Worker(queue_name, sqs_config, work_queue) for x in xrange(100)]
+
+    [w.start() for w in workers]
+
+    time.sleep(60)
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/usergrid-util-python/usergrid_tools/redis/redis_iterator.py b/utils/usergrid-util-python/usergrid_tools/redis/redis_iterator.py
new file mode 100644
index 0000000..6ccdd50
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/redis/redis_iterator.py
@@ -0,0 +1,52 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import json
+from collections import defaultdict
+
+import redis
+import time
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+cache = redis.StrictRedis(host='localhost', port=6379, db=0)
+# cache.flushall()
+
+ecid_counter = defaultdict(int)
+counter = 0
+
+for key in cache.scan_iter(match='*visited'):
+
+    # print key
+    parts = key.split(':')
+    ecid = parts[0]
+
+    if ecid != 'd22a6f10-d3ef-47e3-bbe3-e1ccade5a241':
+        cache.delete(key)
+        ecid_counter[ecid] += 1
+        counter +=1
+
+        if counter % 100000 == 0 and counter != 0:
+            print json.dumps(ecid_counter, indent=2)
+            print 'Sleeping...'
+            time.sleep(60)
+            print 'AWAKE'
+
+print json.dumps(ecid_counter, indent=2)
diff --git a/utils/usergrid-util-python/usergrid_tools/redis/redisscan.py b/utils/usergrid-util-python/usergrid_tools/redis/redisscan.py
new file mode 100644
index 0000000..c97a28a
--- /dev/null
+++ b/utils/usergrid-util-python/usergrid_tools/redis/redisscan.py
@@ -0,0 +1,37 @@
+# */
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *   http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing,
+# * software distributed under the License is distributed on an
+# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# * KIND, either express or implied.  See the License for the
+#    * specific language governing permissions and limitations
+# * under the License.
+# */
+
+import redis
+
+__author__ = 'Jeff.West@yahoo.com'
+
+
+r = redis.Redis("localhost", 6379)
+for key in r.scan_iter():
+    # print '%s: %s' % (r.ttl(key), key)
+
+    if key[0:4] == 'http':
+        r.set(key, 1)
+        # print 'set value'
+
+    if r.ttl(key) > 3600 \
+            or key[0:3] in ['v3:', 'v2', 'v1'] \
+            or ':visited' in key:
+        r.delete(key)
+        print 'delete %s' % key