[maven-release-plugin]  copy for tag jackrabbit-oak-1.5.6

git-svn-id: https://svn.apache.org/repos/asf/jackrabbit/oak/tags/jackrabbit-oak-1.5.6@1753665 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index 8269fc5..f3ad83f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -17,6 +17,7 @@
 package org.apache.jackrabbit.oak.plugins.document;
 
 import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.jackrabbit.oak.commons.PathUtils.concat;
 
 import java.io.InputStream;
 import java.net.UnknownHostException;
@@ -52,6 +53,7 @@
 import org.apache.jackrabbit.oak.commons.json.JsopReader;
 import org.apache.jackrabbit.oak.commons.json.JsopStream;
 import org.apache.jackrabbit.oak.commons.json.JsopTokenizer;
+import org.apache.jackrabbit.oak.commons.json.JsopWriter;
 import org.apache.jackrabbit.oak.json.JsopDiff;
 import org.apache.jackrabbit.oak.plugins.blob.BlobStoreStats;
 import org.apache.jackrabbit.oak.plugins.blob.CachingBlobStore;
@@ -197,7 +199,6 @@
         final DocumentNodeState before = nodeStore.getNode(path, fromRev);
         final DocumentNodeState after = nodeStore.getNode(path, toRev);
         if (before == null || after == null) {
-            // TODO implement correct behavior if the node doesn't/didn't exist
             String msg = String.format("Diff is only supported if the node exists in both cases. " +
                             "Node [%s], fromRev [%s] -> %s, toRev [%s] -> %s",
                     path, fromRev, before != null, toRev, after != null);
@@ -242,7 +243,7 @@
             boolean includeId = filter != null && filter.contains(":id");
             includeId |= filter != null && filter.contains(":hash");
             json.object();
-            n.append(json, includeId);
+            append(n, json, includeId);
             int max;
             if (maxChildNodes == -1) {
                 max = Integer.MAX_VALUE;
@@ -261,7 +262,6 @@
                 json.key(name).object().endObject();
             }
             if (c.hasMore) {
-                // TODO use a better way to notify there are more children
                 json.key(":childNodeCount").value(Long.MAX_VALUE);
             } else {
                 json.key(":childNodeCount").value(c.children.size());
@@ -306,7 +306,6 @@
 
     public String merge(String branchRevisionId, String message)
             throws DocumentStoreException {
-        // TODO improve implementation if needed
         RevisionVector revision = RevisionVector.fromString(branchRevisionId);
         if (!revision.isBranch()) {
             throw new DocumentStoreException("Not a branch: " + branchRevisionId);
@@ -408,7 +407,7 @@
                         throw new DocumentStoreException("Node not found: " + path + " in revision " + baseRevId);
                     }
                     commit.removeNode(path, toRemove);
-                    nodeStore.markAsDeleted(toRemove, commit, true);
+                    markAsDeleted(toRemove, commit, true);
                     break;
                 case '^':
                     t.read(':');
@@ -426,7 +425,6 @@
                     commit.updateProperty(p, propertyName, value);
                     break;
                 case '>': {
-                    // TODO support moving nodes that were modified within this commit
                     t.read(':');
                     String targetPath = t.readString();
                     if (!PathUtils.isAbsolute(targetPath)) {
@@ -438,11 +436,10 @@
                     } else if (nodeExists(targetPath, baseRevId)) {
                         throw new DocumentStoreException("Node already exists: " + targetPath + " in revision " + baseRevId);
                     }
-                    nodeStore.moveNode(source, targetPath, commit);
+                    moveNode(source, targetPath, commit);
                     break;
                 }
                 case '*': {
-                    // TODO support copying nodes that were modified within this commit
                     t.read(':');
                     String targetPath = t.readString();
                     if (!PathUtils.isAbsolute(targetPath)) {
@@ -454,7 +451,7 @@
                     } else if (nodeExists(targetPath, baseRevId)) {
                         throw new DocumentStoreException("Node already exists: " + targetPath + " in revision " + baseRevId);
                     }
-                    nodeStore.copyNode(source, targetPath, commit);
+                    copyNode(source, targetPath, commit);
                     break;
                 }
                 default:
@@ -483,6 +480,55 @@
         commit.addNode(n);
     }
 
+    private void copyNode(DocumentNodeState source, String targetPath, Commit commit) {
+        moveOrCopyNode(false, source, targetPath, commit);
+    }
+
+    private void moveNode(DocumentNodeState source, String targetPath, Commit commit) {
+        moveOrCopyNode(true, source, targetPath, commit);
+    }
+
+    private void markAsDeleted(DocumentNodeState node, Commit commit, boolean subTreeAlso) {
+        commit.removeNode(node.getPath(), node);
+
+        if (subTreeAlso) {
+            // recurse down the tree
+            for (DocumentNodeState child : nodeStore.getChildNodes(node, null, Integer.MAX_VALUE)) {
+                markAsDeleted(child, commit, true);
+            }
+        }
+    }
+
+    private void moveOrCopyNode(boolean move,
+                                DocumentNodeState source,
+                                String targetPath,
+                                Commit commit) {
+        RevisionVector destRevision = commit.getBaseRevision().update(commit.getRevision());
+        DocumentNodeState newNode = new DocumentNodeState(nodeStore, targetPath, destRevision);
+        source.copyTo(newNode);
+
+        commit.addNode(newNode);
+        if (move) {
+            markAsDeleted(source, commit, false);
+        }
+        for (DocumentNodeState child : nodeStore.getChildNodes(source, null, Integer.MAX_VALUE)) {
+            String childName = PathUtils.getName(child.getPath());
+            String destChildPath = concat(targetPath, childName);
+            moveOrCopyNode(move, child, destChildPath, commit);
+        }
+    }
+
+    private static void append(DocumentNodeState node,
+                               JsopWriter json,
+                               boolean includeId) {
+        if (includeId) {
+            json.key(":id").value(node.getId());
+        }
+        for (String name : node.getPropertyNames()) {
+            json.key(name).encodedValue(node.getPropertyAsString(name));
+        }
+    }
+
     //----------------------------< Builder >-----------------------------------
 
     /**
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
index 2deb5f0..6ee9ec1 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
@@ -45,8 +45,6 @@
 import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
-import org.apache.jackrabbit.oak.util.PerfLogger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -392,15 +390,6 @@
         return path + "@" + lastRevision;
     }
 
-    void append(JsopWriter json, boolean includeId) {
-        if (includeId) {
-            json.key(":id").value(getId());
-        }
-        for (String p : properties.keySet()) {
-            json.key(p).encodedValue(getPropertyAsString(p));
-        }
-    }
-
     void setLastRevision(RevisionVector lastRevision) {
         this.lastRevision = lastRevision;
     }
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 85da170..e9a6871 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -845,26 +845,6 @@
         splitCandidates.put(id, id);
     }
 
-    void copyNode(DocumentNodeState source, String targetPath, Commit commit) {
-        moveOrCopyNode(false, source, targetPath, commit);
-    }
-
-    void moveNode(DocumentNodeState source, String targetPath, Commit commit) {
-        moveOrCopyNode(true, source, targetPath, commit);
-    }
-
-    void markAsDeleted(DocumentNodeState node, Commit commit, boolean subTreeAlso) {
-        commit.removeNode(node.getPath(), node);
-
-        if (subTreeAlso) {
-            // recurse down the tree
-            // TODO causes issue with large number of children
-            for (DocumentNodeState child : getChildNodes(node, null, Integer.MAX_VALUE)) {
-                markAsDeleted(child, commit, true);
-            }
-        }
-    }
-
     @CheckForNull
     AbstractDocumentNodeState getSecondaryNodeState(@Nonnull final String path,
                               @Nonnull final RevisionVector rootRevision,
@@ -2378,33 +2358,6 @@
         return System.currentTimeMillis();
     }
 
-    private void moveOrCopyNode(boolean move,
-                                DocumentNodeState source,
-                                String targetPath,
-                                Commit commit) {
-        // TODO Optimize - Move logic would not work well with very move of very large subtrees
-        // At minimum we can optimize by traversing breadth wise and collect node id
-        // and fetch them via '$in' queries
-
-        // TODO Transient Node - Current logic does not account for operations which are part
-        // of this commit i.e. transient nodes. If its required it would need to be looked
-        // into
-
-        RevisionVector destRevision = commit.getBaseRevision().update(commit.getRevision());
-        DocumentNodeState newNode = new DocumentNodeState(this, targetPath, destRevision);
-        source.copyTo(newNode);
-
-        commit.addNode(newNode);
-        if (move) {
-            markAsDeleted(source, commit, false);
-        }
-        for (DocumentNodeState child : getChildNodes(source, null, Integer.MAX_VALUE)) {
-            String childName = PathUtils.getName(child.getPath());
-            String destChildPath = concat(targetPath, childName);
-            moveOrCopyNode(move, child, destChildPath, commit);
-        }
-    }
-
     /**
      * Creates and returns a MarkSweepGarbageCollector if the current BlobStore
      * supports garbage collection
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
index 2592ae7..038b557 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
@@ -43,6 +43,7 @@
 
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.UncheckedExecutionException;
@@ -133,6 +134,11 @@
 
     public static final int IN_CLAUSE_BATCH_SIZE = 500;
 
+    private static final ImmutableSet<String> SERVER_DETAIL_FIELD_NAMES
+            = ImmutableSet.<String>builder()
+            .add("host", "process", "connections", "repl", "storageEngine", "mem")
+            .build();
+
     private final DBCollection nodes;
     private final DBCollection clusterNodes;
     private final DBCollection settings;
@@ -226,7 +232,8 @@
     private boolean hasModifiedIdCompoundIndex = true;
 
     public MongoDocumentStore(DB db, DocumentMK.Builder builder) {
-        String version = checkVersion(db);
+        CommandResult serverStatus = db.command("serverStatus");
+        String version = checkVersion(serverStatus);
         metadata = ImmutableMap.<String,String>builder()
                 .put("type", "mongo")
                 .put("version", version)
@@ -284,14 +291,17 @@
         this.nodeLocks = new StripedNodeDocumentLocks();
         this.nodesCache = builder.buildNodeDocumentCache(this, nodeLocks);
 
-        LOG.info("Configuration maxReplicationLagMillis {}, " +
-                "maxDeltaForModTimeIdxSecs {}, disableIndexHint {}, {}",
-                maxReplicationLagMillis, maxDeltaForModTimeIdxSecs,
-                disableIndexHint, db.getWriteConcern());
+        LOG.info("Connected to MongoDB {} with maxReplicationLagMillis {}, " +
+                "maxDeltaForModTimeIdxSecs {}, disableIndexHint {}, " +
+                "{}, serverStatus {}",
+                version, maxReplicationLagMillis, maxDeltaForModTimeIdxSecs,
+                disableIndexHint, db.getWriteConcern(),
+                serverDetails(serverStatus));
     }
 
-    private static String checkVersion(DB db) {
-        String version = db.command("buildInfo").getString("version");
+    @Nonnull
+    private static String checkVersion(CommandResult serverStatus) {
+        String version = serverStatus.getString("version");
         Matcher m = Pattern.compile("^(\\d+)\\.(\\d+)\\..*").matcher(version);
         if (!m.matches()) {
             throw new IllegalArgumentException("Malformed MongoDB version: " + version);
@@ -310,6 +320,18 @@
         return version;
     }
 
+    @Nonnull
+    private static String serverDetails(CommandResult serverStatus) {
+        Map<String, Object> details = Maps.newHashMap();
+        for (String key : SERVER_DETAIL_FIELD_NAMES) {
+            Object value = serverStatus.get(key);
+            if (value != null) {
+                details.put(key, value);
+            }
+        }
+        return details.toString();
+    }
+
     @Override
     public void finalize() throws Throwable {
         super.finalize();
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserConfigurationImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserConfigurationImpl.java
index e12928b..eddba3a 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserConfigurationImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserConfigurationImpl.java
@@ -95,6 +95,10 @@
                 label = "Hash Salt Size",
                 description = "Salt size to generate the password hash.",
                 intValue = PasswordUtil.DEFAULT_SALT_SIZE),
+        @Property(name = UserConstants.PARAM_OMIT_ADMIN_PW,
+                label = "Omit Admin Password",
+                description = "Boolean flag to prevent the administrator account to be created with a password upon repository initialization. Please note that changing this option after the initial repository setup will have no effect.",
+                boolValue = false),
         @Property(name = UserConstants.PARAM_SUPPORT_AUTOSAVE,
                 label = "Autosave Support",
                 description = "Configuration option to enable autosave behavior. Note: this config option is present for backwards compatibility with Jackrabbit 2.x and should only be used for broken code that doesn't properly verify the autosave behavior (see Jackrabbit API). If this option is turned on autosave will be enabled by default; otherwise autosave is not supported.",
diff --git a/oak-doc/src/site/markdown/plugins/blobstore.md b/oak-doc/src/site/markdown/plugins/blobstore.md
index f37fa41..9731263 100644
--- a/oak-doc/src/site/markdown/plugins/blobstore.md
+++ b/oak-doc/src/site/markdown/plugins/blobstore.md
@@ -144,6 +144,9 @@
 
 * `MarkSweepGarbageCollector#collectGarbage()` (Oak 1.0.x)
 * `MarkSweepGarbageCollector#collectGarbage(false)` (Oak 1.2.x)
+* If the MBeans are registered in the MBeanServer then the following can also be used to trigger GC:
+    * `BlobGC#startBlobGC()` which takes in a `markOnly` boolean parameter to indicate mark only or complete gc
+
  
 #### Shared DataStore Blob Garbage Collection (Since 1.2.0)
 
@@ -175,6 +178,105 @@
 * SharedS3DataStore - Extends the S3DataStore to enable sharing of the data store with
                         multiple repositories                        
  
+##### Checking GC status for Shared DataStore Garbage Collection
+
+The status of the GC operations on all the repositories connected to the DataStore can be checked by calling:
+
+* `MarkSweepGarbageCollector#getStats()` which returns a list of `GarbageCollectionRepoStats` objects having the following fields:
+    * repositoryId - The repositoryId of the repository
+    * local - Indicates whether the repositoryId is of local instance where the operation ran
+    * startTime - Start time of the mark operation on the repository
+    * endTime - End time of the mark operation on the repository
+    * length - Size of the references file created
+    * numLines - Number of references available
+* If the MBeans are registered in the MBeanServer then the following can also be used to retrieve the status:
+    * `BlobGC#getBlobGCStatus()` which returns a CompositeData with the above fields.
+    
+This operation can also be used to ascertain when the 'Mark' phase has executed successfully on all the repositories, as part of the steps to automate the GC in the Shared DataStore configuration.
+It should be a sufficient condition to check that the references file is available on all repositories.
+If the server running Oak has remote JMX connection enabled the following code example can be used to connect remotely and check if the mark phase has concluded on all repository instances.
+
+
+```java
+import java.util.Hashtable;
+
+import javax.management.openmbean.TabularData;
+import javax.management.MBeanServerConnection;
+import javax.management.MBeanServerInvocationHandler;
+import javax.management.ObjectName;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+import javax.management.openmbean.CompositeData;
+
+
+/**
+ * Checks the status of the mark operation on all instances sharing the DataStore.
+ */
+public class GetGCStats {
+
+    public static void main(String[] args) throws Exception {
+        String userid = "<user>";
+        String password = "<password>";
+        String serverUrl = "service:jmx:rmi:///jndi/rmi://<host:port>/jmxrmi";
+        String OBJECT_NAME = "org.apache.jackrabbit.oak:name=Document node store blob garbage collection,type=BlobGarbageCollection";
+        String[] buffer = new String[] {userid, password};
+        Hashtable<String, String[]> attributes = new Hashtable<String, String[]>();
+        attributes.put("jmx.remote.credentials", buffer);
+        MBeanServerConnection server = JMXConnectorFactory
+            .connect(new JMXServiceURL(serverUrl), attributes).getMBeanServerConnection();
+        ObjectName name = new ObjectName(OBJECT_NAME);
+        BlobGCMBean gcBean = MBeanServerInvocationHandler
+            .newProxyInstance(server, name, BlobGCMBean.class, false);
+
+        boolean markDone = checkMarkDone("GlobalMarkStats", gcBean.getGlobalMarkStats());
+        System.out.println("Mark done on all instances - " + markDone);
+    }
+
+    public static boolean checkMarkDone(String operation, TabularData data) {
+        System.out.println("-----Operation " + operation + "--------------");
+
+        boolean markDoneOnOthers = true;
+        try {
+            System.out.println("Number of instances " + data.size());
+
+            for (Object o : data.values()) {
+                CompositeData row = (CompositeData) o;
+                String repositoryId = row.get("repositoryId").toString();
+                System.out.println("Repository  " + repositoryId);
+
+                if ((!row.containsKey("markEndTime")
+                        || row.get("markEndTime") == null
+                        || row.get("markEndTime").toString().length() == 0)) {
+                    markDoneOnOthers = false;
+                    System.out.println("Mark not done on repository : " + repositoryId);
+                }
+            }
+        } catch (Exception e) {
+            System.out.println(
+                "-----Error during operation " + operation + "--------------" + e.getMessage());
+        }
+        System.out.println("-----Completed " + operation + "--------------");
+
+        return markDoneOnOthers;
+    }
+}
+```
+
+#### Consistency Check
+The data store consistency check will report any data store binaries that are missing but are still referenced. The consistency check can be triggered by:
+
+* `MarkSweepGarbageCollector#checkConsistency` 
+* If the MBeans are registered in the MBeanServer then the following can also be used:
+    * `BlobGCMbean#checkConsistency`
+
+After the consistency check is complete, a message will show the number of binaries reported as missing. If the number is greater than 0, check the logs configured for `org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector` for more details on the missing binaries. 
+
+Below is an example of how the missing binaries are reported in the logs:
+>
+> 11:32:39.673 INFO [main] MarkSweepGarbageCollector.java:600 Consistency check found [1] missing blobs
+> 11:32:39.673 WARN [main] MarkSweepGarbageCollector.java:602 Consistency check failure in the the blob store : DataStore backed BlobStore [org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore], check missing candidates in file /tmp/gcworkdir-1467352959243/gccand-1467352959243
+
+
 
 [1]: http://serverfault.com/questions/52861/how-does-dropbox-version-upload-large-files
 [2]: http://wiki.apache.org/jackrabbit/DataStore
diff --git a/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/SecurityProviderRegistrationTest.groovy b/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/SecurityProviderRegistrationTest.groovy
index 5ec615c..dd34ee3 100644
--- a/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/SecurityProviderRegistrationTest.groovy
+++ b/oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/SecurityProviderRegistrationTest.groovy
@@ -50,7 +50,7 @@
     protected PojoServiceRegistry getRegistry() {
         return registry
     }
-/**
+    /**
      * Test that, without any additional configuration, a SecurityProvider
      * service is registered by default.
      */
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CachingSegmentReader.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CachingSegmentReader.java
index 0f45f1b..a23fd89 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CachingSegmentReader.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CachingSegmentReader.java
@@ -20,7 +20,6 @@
 package org.apache.jackrabbit.oak.segment;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-import static java.lang.Long.getLong;
 
 import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
@@ -38,10 +37,7 @@
  */
 public class CachingSegmentReader implements SegmentReader {
     public static final int DEFAULT_STRING_CACHE_MB = 256;
-    public static final String STRING_CACHE_MB = "oak.segment.stringCacheMB";
-
     public static final int DEFAULT_TEMPLATE_CACHE_MB = 64;
-    public static final String TEMPLATE_CACHE_MB = "oak.segment.templateCacheMB";
 
     @Nonnull
     private final Supplier<SegmentWriter> writer;
@@ -78,8 +74,8 @@
             long templateCacheMB) {
         this.writer = checkNotNull(writer);
         this.blobStore = blobStore;
-        stringCache = new StringCache(getLong(STRING_CACHE_MB, stringCacheMB) * 1024 * 1024);
-        templateCache = new TemplateCache(getLong(TEMPLATE_CACHE_MB, templateCacheMB * 1024 * 1024));
+        stringCache = new StringCache(stringCacheMB * 1024 * 1024);
+        templateCache = new TemplateCache(templateCacheMB * 1024 * 1024);
     }
 
     /**
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ReaderCache.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ReaderCache.java
index 7139a70..3dbe4d3 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ReaderCache.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ReaderCache.java
@@ -70,24 +70,13 @@
      */
     protected ReaderCache(long maxWeight, int averageWeight, @Nonnull String name) {
         this.name = checkNotNull(name);
-        if (maxWeight >= 0) {
-            fastCache = new FastCache<>();
-            cache = CacheLIRS.<CacheKey<T>, T>newBuilder()
-                    .module(name)
-                    .maximumWeight(maxWeight)
-                    .averageWeight(averageWeight)
-                    .weigher(weigher)
-                    .build();
-        } else {
-            fastCache = null;
-            // dummy cache to prevent NPE on the getStats() call
-            cache = CacheLIRS.<CacheKey<T>, T> newBuilder()
-                    .module(name)
-                    .maximumWeight(1)
-                    .averageWeight(averageWeight)
-                    .weigher(weigher)
-                    .build();
-        }
+        fastCache = new FastCache<>();
+        cache = CacheLIRS.<CacheKey<T>, T>newBuilder()
+                .module(name)
+                .maximumWeight(maxWeight)
+                .averageWeight(averageWeight)
+                .weigher(weigher)
+                .build();
     }
 
     @Nonnull
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentCache.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentCache.java
index f29a599..a29a5ee 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentCache.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentCache.java
@@ -24,6 +24,7 @@
 
 import javax.annotation.Nonnull;
 
+import com.google.common.cache.RemovalCause;
 import com.google.common.cache.Weigher;
 import org.apache.jackrabbit.oak.cache.CacheLIRS;
 import org.apache.jackrabbit.oak.cache.CacheLIRS.EvictionCallback;
@@ -31,14 +32,25 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.cache.RemovalCause;
-
+// FIXME OAK-4474: Finalise SegmentCache: document, add monitoring, management, tests, logging
 /**
- * FIXME OAK-4474: Finalise SegmentCache: document, add monitoring, management, tests, logging
+ * A cache for {@link Segment} instances by their {@link SegmentId}.
+ * <p>
+ * Conceptually this cache serves as a 2nd level cache for segments. The 1st level cache is
+ * implemented by memoising the segment in its id (see {@link SegmentId#segment}. Every time
+ * an segment is evicted from this cache the memoised segment is discarded (see
+ * {@link SegmentId#unloaded()}).
+ * <p>
+ * As a consequence this cache is actually only queried for segments it does not contain,
+ * which are then loaded through the loader passed to {@link #getSegment(SegmentId, Callable)}.
+ * This behaviour is eventually reflected in the cache statistics (see {@link #getCacheStats()}),
+ * which always reports a {@link CacheStats#getMissRate() miss rate} of 1.
  */
 public class SegmentCache {
     private static final Logger LOG = LoggerFactory.getLogger(SegmentCache.class);
 
+    public static final int DEFAULT_SEGMENT_CACHE_MB = 256;
+
     private final Weigher<SegmentId, Segment> weigher = new Weigher<SegmentId, Segment>() {
         @Override
         public int weigh(SegmentId id, Segment segment) {
@@ -52,6 +64,10 @@
     @Nonnull
     private final CacheLIRS<SegmentId, Segment> cache;
 
+    /**
+     * Create a new segment cache of the given size.
+     * @param cacheSizeMB  size of the cache in megabytes.
+     */
     public SegmentCache(long cacheSizeMB) {
         this.cache = CacheLIRS.<SegmentId, Segment>newBuilder()
             .module("SegmentCache")
@@ -68,21 +84,39 @@
             .build();
     }
 
+    /**
+     * Retrieve an segment from the cache or load it and cache it if not yet in the cache.
+     * @param id        the id of the segment
+     * @param loader    the loader to load the segment if not yet in the cache
+     * @return          the segment identified by {@code id}
+     * @throws ExecutionException  when {@code loader} failed to load an segment
+     */
     @Nonnull
     public Segment getSegment(@Nonnull SegmentId id, @Nonnull Callable<Segment> loader)
     throws ExecutionException {
         return cache.get(id, loader);
     }
 
+    /**
+     * Put a segment into the cache
+     * @param segment  the segment to cache
+     */
     public void putSegment(@Nonnull Segment segment) {
         cache.put(segment.getSegmentId(), segment);
         segment.getSegmentId().loaded(segment);
     }
 
+    /**
+     * Clear all segment from the cache
+     */
     public void clear() {
         cache.invalidateAll();
     }
 
+    /**
+     * See the class comment regarding some peculiarities of this cache's statistics
+     * @return  statistics for this cache.
+     */
     @Nonnull
     public CacheStats getCacheStats() {
         return new CacheStats(cache, "Segment Cache", weigher, cache.getMaxMemory());
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreService.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreService.java
index b8199c2..2fb3f2e 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreService.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreService.java
@@ -135,10 +135,24 @@
 
     @Property(
             intValue = 256,
-            label = "Cache size (MB)",
-            description = "Cache size for storing most recently used Segments"
+            label = "Segment cache size (MB)",
+            description = "Cache size for storing most recently used segments"
     )
-    public static final String CACHE = "cache";
+    public static final String SEGMENT_CACHE_SIZE = "segmentCache.size";
+
+    @Property(
+            intValue = 256,
+            label = "String cache size (MB)",
+            description = "Cache size for storing most recently used strings"
+    )
+    public static final String STRING_CACHE_SIZE = "stringCache.size";
+
+    @Property(
+            intValue = 64,
+            label = "Template cache size (MB)",
+            description = "Cache size for storing most recently used templates"
+    )
+    public static final String TEMPLATE_CACHE_SIZE = "templateCache.size";
 
     @Property(
             byteValue = MEMORY_THRESHOLD_DEFAULT,
@@ -335,7 +349,9 @@
 
         // Build the FileStore
         FileStoreBuilder builder = fileStoreBuilder(getDirectory())
-                .withCacheSize(getCacheSize())
+                .withSegmentCacheSize(getSegmentCacheSize())
+                .withStringCacheSize(getStringCacheSize())
+                .withTemplateCacheSize(getTemplateCacheSize())
                 .withMaxFileSize(getMaxFileSize())
                 .withMemoryMapping(getMode().equals("64"))
                 .withGCMonitor(gcMonitor)
@@ -608,18 +624,26 @@
         return System.getProperty(MODE, System.getProperty("sun.arch.data.model", "32"));
     }
 
-    private String getCacheSizeProperty() {
-        String cache = property(CACHE);
+    private String getCacheSize(String propertyName) {
+        String cacheSize = property(propertyName);
 
-        if (cache != null) {
-            return cache;
+        if (cacheSize != null) {
+            return cacheSize;
         }
 
-        return System.getProperty(CACHE);
+        return System.getProperty(propertyName);
     }
 
-    private int getCacheSize() {
-        return Integer.parseInt(getCacheSizeProperty());
+    private int getSegmentCacheSize() {
+        return Integer.parseInt(getCacheSize(SEGMENT_CACHE_SIZE));
+    }
+
+    private int getStringCacheSize() {
+        return Integer.parseInt(getCacheSize(STRING_CACHE_SIZE));
+    }
+
+    private int getTemplateCacheSize() {
+        return Integer.parseInt(getCacheSize(TEMPLATE_CACHE_SIZE));
     }
 
     private String getMaxFileSizeProperty() {
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
index 7a07d19..92fefa1 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
@@ -36,8 +36,6 @@
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
-import static org.apache.jackrabbit.oak.segment.CachingSegmentReader.DEFAULT_STRING_CACHE_MB;
-import static org.apache.jackrabbit.oak.segment.CachingSegmentReader.DEFAULT_TEMPLATE_CACHE_MB;
 import static org.apache.jackrabbit.oak.segment.SegmentId.isDataSegmentId;
 import static org.apache.jackrabbit.oak.segment.SegmentWriterBuilder.segmentWriterBuilder;
 import static org.apache.jackrabbit.oak.segment.file.GCListener.Status.FAILURE;
@@ -238,16 +236,9 @@
         this.revisions = builder.getRevisions();
         this.blobStore = builder.getBlobStore();
 
-        // FIXME OAK-4451 refactor cache size configurations
         // FIXME OAK-4277: Finalise de-duplication caches: inject caches
         // from the outside so we can get rid of the cache stat accessors
-        if (builder.getCacheSize() < 0) {
-            this.segmentCache = new SegmentCache(0);
-        } else if (builder.getCacheSize() > 0) {
-            this.segmentCache = new SegmentCache(builder.getCacheSize());
-        } else {
-            this.segmentCache = new SegmentCache(DEFAULT_STRING_CACHE_MB);
-        }
+        this.segmentCache = new SegmentCache(builder.getSegmentCacheSize());
         Supplier<SegmentWriter> getWriter = new Supplier<SegmentWriter>() {
             @Override
             public SegmentWriter get() {
@@ -255,18 +246,10 @@
             }
         };
 
-        // FIXME OAK-4451 refactor cache size configurations
         // FIXME OAK-4451: Implement a proper template cache: inject caches
         // from the outside so we can get rid of the cache stat accessors
-        if (builder.getCacheSize() < 0) {
-            this.segmentReader = new CachingSegmentReader(getWriter, blobStore, 0, 0);
-        } else if (builder.getCacheSize() > 0) {
-            this.segmentReader = new CachingSegmentReader(getWriter, blobStore,
-                    (long) builder.getCacheSize(), (long) builder.getCacheSize());
-        } else {
-            this.segmentReader = new CachingSegmentReader(getWriter, blobStore,
-                    (long) DEFAULT_STRING_CACHE_MB, (long) DEFAULT_TEMPLATE_CACHE_MB);
-        }
+        this.segmentReader = new CachingSegmentReader(getWriter, blobStore,
+                builder.getStringCacheSize(), builder.getTemplateCacheSize());
 
         Supplier<Integer> getGeneration = new Supplier<Integer>() {
             @Override
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreBuilder.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreBuilder.java
index 76a1463..6120ff3 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreBuilder.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreBuilder.java
@@ -21,7 +21,9 @@
 
 import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Preconditions.checkState;
-import static java.util.Collections.singleton;
+import static org.apache.jackrabbit.oak.segment.CachingSegmentReader.DEFAULT_STRING_CACHE_MB;
+import static org.apache.jackrabbit.oak.segment.CachingSegmentReader.DEFAULT_TEMPLATE_CACHE_MB;
+import static org.apache.jackrabbit.oak.segment.SegmentCache.DEFAULT_SEGMENT_CACHE_MB;
 import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.defaultGCOptions;
 
 import java.io.File;
@@ -56,7 +58,11 @@
 
     private int maxFileSize = 256;
 
-    private int cacheSize;   // 0 -> DEFAULT_MEMORY_CACHE_SIZE
+    private int segmentCacheSize = DEFAULT_SEGMENT_CACHE_MB;
+
+    private int stringCacheSize = DEFAULT_STRING_CACHE_MB;
+
+    private int templateCacheSize = DEFAULT_TEMPLATE_CACHE_MB;
 
     private boolean memoryMapping;
 
@@ -176,23 +182,35 @@
     }
 
     /**
-     * Size of the cache in MB.
-     * @param cacheSize
+     * Size of the segment cache in MB.
+     * @param segmentCacheSize  None negative cache size
      * @return this instance
      */
     @Nonnull
-    public FileStoreBuilder withCacheSize(int cacheSize) {
-        this.cacheSize = cacheSize;
+    public FileStoreBuilder withSegmentCacheSize(int segmentCacheSize) {
+        this.segmentCacheSize = segmentCacheSize;
         return this;
     }
 
     /**
-     * Turn caching off
+     * Size of the string cache in MB.
+     * @param stringCacheSize  None negative cache size
      * @return this instance
      */
     @Nonnull
-    public FileStoreBuilder withNoCache() {
-        this.cacheSize = -1;
+    public FileStoreBuilder withStringCacheSize(int stringCacheSize) {
+        this.stringCacheSize = stringCacheSize;
+        return this;
+    }
+
+    /**
+     * Size of the template cache in MB.
+     * @param templateCacheSize  None negative cache size
+     * @return this instance
+     */
+    @Nonnull
+    public FileStoreBuilder withTemplateCacheSize(int templateCacheSize) {
+        this.templateCacheSize = templateCacheSize;
         return this;
     }
 
@@ -314,8 +332,16 @@
         return maxFileSize;
     }
 
-    int getCacheSize() {
-        return cacheSize;
+    int getSegmentCacheSize() {
+        return segmentCacheSize;
+    }
+
+    int getStringCacheSize() {
+        return stringCacheSize;
+    }
+
+    int getTemplateCacheSize() {
+        return templateCacheSize;
     }
 
     boolean getMemoryMapping() {
@@ -354,7 +380,9 @@
                 "directory=" + directory +
                 ", blobStore=" + blobStore +
                 ", maxFileSize=" + maxFileSize +
-                ", cacheSize=" + cacheSize +
+                ", segmentCacheSize=" + segmentCacheSize +
+                ", stringCacheSize=" + stringCacheSize +
+                ", templateCacheSize=" + templateCacheSize +
                 ", memoryMapping=" + memoryMapping +
                 ", gcOptions=" + gcOptions +
                 '}';
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java
index d75ca72..e9d00d4 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java
@@ -102,7 +102,6 @@
             FileStoreBuilder builder = fileStoreBuilder(getWorkDir())
                     .withBlobStore(blobStore)
                     .withMaxFileSize(256)
-                    .withCacheSize(64)
                     .withMemoryMapping(false)
                     .withGCOptions(gcOptions);
             store = builder.build();
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/JournalEntryTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/JournalEntryTest.java
index 253ce63..36f5144 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/JournalEntryTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/JournalEntryTest.java
@@ -46,8 +46,13 @@
 
     @Test
     public void timestampInJournalEntry() throws Exception{
-        FileStore fileStore = fileStoreBuilder(tempFolder.getRoot()).withMaxFileSize(5)
-                .withNoCache().withMemoryMapping(true).build();
+        FileStore fileStore = fileStoreBuilder(tempFolder.getRoot())
+                .withMaxFileSize(5)
+                .withSegmentCacheSize(0)
+                .withStringCacheSize(0)
+                .withTemplateCacheSize(0)
+                .withMemoryMapping(true)
+                .build();
 
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
 
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/LargeNumberOfPropertiesTestIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/LargeNumberOfPropertiesTestIT.java
index 781f1ab..e2d1d0e 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/LargeNumberOfPropertiesTestIT.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/LargeNumberOfPropertiesTestIT.java
@@ -69,8 +69,13 @@
 
     @Test
     public void corruption() throws Exception {
-        FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(5)
-                .withNoCache().withMemoryMapping(true).build();
+        FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
+                .withMaxFileSize(5)
+                .withSegmentCacheSize(0)
+                .withStringCacheSize(0)
+                .withTemplateCacheSize(0)
+                .withMemoryMapping(true)
+                .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
 
         NodeBuilder root = nodeStore.getRoot().builder();
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/SegmentReferenceLimitTestIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/SegmentReferenceLimitTestIT.java
index 5e5d476..761d6c9 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/SegmentReferenceLimitTestIT.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/SegmentReferenceLimitTestIT.java
@@ -76,8 +76,13 @@
 
     @Test
     public void corruption() throws IOException, CommitFailedException, ExecutionException, InterruptedException {
-        FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1)
-                .withNoCache().withMemoryMapping(true).build();
+        FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
+                .withMaxFileSize(1)
+                .withSegmentCacheSize(0)
+                .withStringCacheSize(0)
+                .withTemplateCacheSize(0)
+                .withMemoryMapping(true)
+                .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
 
         NodeBuilder root = nodeStore.getRoot().builder();