Merge branch 'pr/225' into two-dot-o

Conflicts:
	stack/config/src/main/resources/usergrid-default.properties
	stack/core/src/main/resources/usergrid-core-context.xml
diff --git a/stack/config/src/main/resources/usergrid-default.properties b/stack/config/src/main/resources/usergrid-default.properties
index d42d12c..53b6445 100644
--- a/stack/config/src/main/resources/usergrid-default.properties
+++ b/stack/config/src/main/resources/usergrid-default.properties
@@ -97,11 +97,23 @@
 #The maximum number of pending mutations allowed in ram before it is flushed to cassandra
 cassandra.mutation.flushsize=2000
 
-#Keyspace to use for locking
-#Note that if this is deployed in a production cluster, the RF on the keyspace MUST be updated to use an odd number for it's replication Factor.
-#Even numbers for RF can potentially case the locks to fail, via "split brain" when read at QUORUM on lock verification
+# Keyspace to use for locking
+# Note that if this is deployed in a production cluster, the RF on the keyspace
+# MUST be updated to use an odd number for it's replication Factor. Even numbers
+# for RF can potentially case the locks to fail, via "split brain" when read at
+# QUORUM on lock verification
 cassandra.lock.keyspace=Locks
 
+# locking read & write policies
+cassandra.lock.readcl=LOCAL_QUORUM
+cassandra.lock.writecl=LOCAL_QUORUM
+
+# Timeout in ms before hector considers a thrift socket dead
+cassandra.thriftSocketTimeout=0
+# If hector should use host TCP keep alive settings
+cassandra.useSocketKeepalive=false
+
+
 
 ###############################################################################
 #
@@ -185,17 +197,22 @@
 #
 # Scheduler setup
 
-#Time in milliseconds that a job can be started without a heartbeat before being considered dead.
-#Note that this must be high enough so that jobs that are iteration based can run an iteration and update the heartbeat
+# Time in milliseconds that a job can be started without a heartbeat before being considered dead.
+# Note that this must be high enough so that jobs that are iteration based can run an iteration and update the heartbeat
 usergrid.scheduler.job.timeout=120000
-#The path to the queue in the managment app to get jobs from
+
+# The path to the queue in the managment app to get jobs from
 usergrid.scheduler.job.queueName=/jobs
-#The number of executor threads to allow
+
+# The number of executor threads to allow
 usergrid.scheduler.job.workers=4
-#Poll interval to check for new jobs in millseconds.  5 seconds is the default.  It will run all jobs up to current so this won't limit throughput
+
+# Poll interval to check for new jobs in millseconds.  5 seconds is the default.
+# It will run all jobs up to current so this won't limit throughput
 usergrid.scheduler.job.interval=5000
-#The max number of times a job can fail before removing it permanently. Note that this count is INCLUSIVE.
-#If the value is 10, the 11th fail will mark the job as dead
+
+# The max number of times a job can fail before removing it permanently. Note that this count is INCLUSIVE.
+# If the value is 10, the 11th fail will mark the job as dead
 usergrid.scheduler.job.maxfail=10
 
 # Zookeeper instances
diff --git a/stack/core/src/main/resources/usergrid-core-context.xml b/stack/core/src/main/resources/usergrid-core-context.xml
index cd40d6d..4424896 100644
--- a/stack/core/src/main/resources/usergrid-core-context.xml
+++ b/stack/core/src/main/resources/usergrid-core-context.xml
@@ -43,25 +43,24 @@
 
 	<!-- The Time Resolution used for the cluster -->
 	<bean id="microsecondsTimeResolution" class="me.prettyprint.cassandra.service.clock.MicrosecondsClockResolution" />
-  <bean id="traceTagManager" class="org.apache.usergrid.persistence.cassandra.util.TraceTagManager"/>
-  <bean id="traceTagReporter" class="org.apache.usergrid.persistence.cassandra.util.Slf4jTraceTagReporter"/>
+    <bean id="traceTagManager" class="org.apache.usergrid.persistence.cassandra.util.TraceTagManager"/>
+    <bean id="traceTagReporter" class="org.apache.usergrid.persistence.cassandra.util.Slf4jTraceTagReporter"/>
 
-  <bean id="taggedOpTimer" class="org.apache.usergrid.persistence.cassandra.util.TaggedOpTimer">
-    <constructor-arg ref="traceTagManager"/>
-  </bean>
+    <bean id="taggedOpTimer" class="org.apache.usergrid.persistence.cassandra.util.TaggedOpTimer">
+      <constructor-arg ref="traceTagManager"/>
+    </bean>
 
 	<bean id="cassandraHostConfigurator" class="me.prettyprint.cassandra.service.CassandraHostConfigurator">
 		<constructor-arg value="${cassandra.url}" />
         <!-- set the pool size if it's available.  If not go with 50 -->
-        <property name="maxActive" value="${cassandra.connections:50}"/>
-        <!--<property orgAppName="clockResolution" ref="microsecondsTimeResolution" />-->
+        <property name="maxActive" value="${cassandra.connections:20}"/>
+        <property name="cassandraThriftSocketTimeout" value="${cassandra.thriftSocketTimeout:0}" />
+        <property name="useSocketKeepalive" value="${cassandra.useSocketKeepalive:false}" />
+        <!-- <property name="clockResolution" ref="microsecondsTimeResolution" /> -->
         <property name="opTimer" ref="taggedOpTimer"/>
         <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
 	</bean>
 
-
-
-
 	<bean id="cassandraCluster" class="me.prettyprint.cassandra.service.ThriftCluster">
 		<constructor-arg value="${cassandra.cluster}" />
 		<constructor-arg ref="cassandraHostConfigurator" />
@@ -70,20 +69,25 @@
     <bean id="loadBalancingPolicy" class="me.prettyprint.cassandra.connection.DynamicLoadBalancingPolicy"/>
 
 	<!--  locking for a single node -->
-<!--	<bean orgAppName="lockManager"
-        class="org.apache.usergrid.locking.singlenode.SingleNodeLockManagerImpl" />-->
+
+    <!-- <bean name="lockManager" class="org.apache.usergrid.locking.singlenode.SingleNodeLockManagerImpl" /> -->
 
 	<!--  hector based locks -->
-	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace
-    MUST be updated to use an odd number for it's replication Factor.  Even numbers can
-    potentially case the locks to fail, via "split brain" when read at QUORUM on lock verification-->
+	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace MUST
+	     be updated to use an odd number for it's replication Factor. Even numbers can potentially
+	     case the locks to fail, via "split brain" when read at QUORUM on lock verification-->
 
-	<bean name="lockManager" class="org.apache.usergrid.locking.cassandra.HectorLockManagerImpl" >
+	<bean name="lockManager" class="org.apache.usergrid.locking.cassandra.HectorLockManagerImpl">
 		<property name="cluster" ref="cassandraCluster"/>
 		<property name="keyspaceName" value="${cassandra.lock.keyspace}"/>
-		<property name="consistencyLevelPolicy" ref="consistencyLevelPolicy"/>
+        <property name="consistencyLevelPolicy" ref="hlockConsistencyLevelPolicy" />
 	</bean>
 
+    <bean name="hlockConsistencyLevelPolicy" class="me.prettyprint.cassandra.model.ConfigurableConsistencyLevel">
+        <property name="defaultReadConsistencyLevel" value="${cassandra.lock.readcl}"/>
+        <property name="defaultWriteConsistencyLevel" value="${cassandra.lock.writecl}"/>
+    </bean>
+
 	<!--  zookeeper locks -->
 	<!--
 	<bean orgAppName="lockManager" class="org.apache.usergrid.locking.zookeeper.ZooKeeperLockManagerImpl" >
@@ -92,7 +96,6 @@
 		<property orgAppName="maxAttempts" value="10"/>
 	</bean>  -->
 
-
     <bean id="injector"
    		class="org.apache.usergrid.corepersistence.GuiceFactory">
    		<constructor-arg ref="cassandraHostConfigurator" />
@@ -162,6 +165,8 @@
     	<constructor-arg value="${usergrid.index.defaultbucketsize}"/>
     </bean>
 
+    <bean id="entityManager" class="org.apache.usergrid.persistence.cassandra.EntityManagerImpl" scope="prototype"/>
+
     <bean id="mailUtils" class="org.apache.usergrid.utils.MailUtils" />
 
     <bean id="traceTagAspect" class="org.apache.usergrid.persistence.cassandra.util.TraceTagAspect"/>