Merge pull request #530 from sebastian-nagel/NUTCH-2789

NUTCH-2789 Documentation: update links to point to cwiki
diff --git a/src/bin/crawl b/src/bin/crawl
index 9b77ce4..23a2940 100755
--- a/src/bin/crawl
+++ b/src/bin/crawl
@@ -370,10 +370,19 @@
   echo "CrawlDB update"
   __bin_nutch updatedb "${commonOptions[@]}" "$CRAWL_PATH"/crawldb  "$CRAWL_PATH"/segments/$SEGMENT
 
-  # note that the link inversion - indexing routine can be done within the main loop
-  # on a per segment basis
+  echo "HostDB update"
+  if $HOSTDBUPDATE; then
+  __update_hostdb
+  fi
+
+  # Note that all steps below in this loop (link inversion, deduplication, indexing)
+  # can be done
+  # - either inside the loop on a per segment basis
+  # - or after the loop over all segments created in all loop iterations
+  #   (both invertlinks and index accept multiple segments as input)
+  # The latter is more efficient but the index is then updated later.
   echo "Link inversion"
-  __bin_nutch invertlinks "${commonOptions[@]}" "$CRAWL_PATH"/linkdb "$CRAWL_PATH"/segments/$SEGMENT
+  __bin_nutch invertlinks "${commonOptions[@]}" "$CRAWL_PATH"/linkdb "$CRAWL_PATH"/segments/$SEGMENT -noNormalize -nofilter
 
   echo "Dedup on crawldb"
   __bin_nutch dedup "${commonOptions[@]}" "$CRAWL_PATH"/crawldb
@@ -385,30 +394,25 @@
       echo "Skipping indexing ..."
   fi
 
-  echo "HostDB update"
-  if $HOSTDBUPDATE; then
-  __update_hostdb
-  fi
-
-  #######################################################
-  # The following commands fall into WebGraph territory
-  # and should be uncommented based on your requirements
-  #######################################################
-  #echo "Building WebGraph within $CRAWL_PATH on all segments in $CRAWL_PATH/segments/"
-  #__bin_nutch webgraph "${commonOptions[@]}" -filter -normalize -segmentDir "$CRAWL_PATH"/segments/ -webgraphdb "$CRAWL_PATH"
-
-  #echo "Running Loops Job on WebGraph within $CRAWL_PATH"
-  #__bin_nutch org.apache.nutch.scoring.webgraph.Loops "${commonOptions[@]}" -webgraphdb "$CRAWL_PATH"
-
-  #echo "Running LinkRank Algorithm on WebGraph within $CRAWL_PATH"
-  #__bin_nutch linkrank "${commonOptions[@]}" -webgraphdb "$CRAWL_PATH"
-
-  #echo "Running ScoreUpdater Job with $CRAWL_PATH/crawldb and  WebGraph within $CRAWL_PATH"
-  #__bin_nutch scoreupdater "${commonOptions[@]}" -crawldb "$CRAWL_PATH"/crawldb -webgraphdb "$CRAWL_PATH"
-
-  #echo "Running NodeDumper on WebGraph within $CRAWL_PATH and dumping output to $CRAWL_PATH/dump/scores"
-  #__bin_nutch nodedumper "${commonOptions[@]}" -scores -topn 1000 -webgraphdb "$CRAWL_PATH" -output "$CRAWL_PATH"/dump/scores
-
 done
 
+#######################################################
+# The following commands fall into WebGraph territory
+# and should be uncommented based on your requirements
+#######################################################
+#echo "Building WebGraph within $CRAWL_PATH on all segments in $CRAWL_PATH/segments/"
+#__bin_nutch webgraph "${commonOptions[@]}" -filter -normalize -segmentDir "$CRAWL_PATH"/segments/ -webgraphdb "$CRAWL_PATH"
+
+#echo "Running Loops Job on WebGraph within $CRAWL_PATH"
+#__bin_nutch org.apache.nutch.scoring.webgraph.Loops "${commonOptions[@]}" -webgraphdb "$CRAWL_PATH"
+
+#echo "Running LinkRank Algorithm on WebGraph within $CRAWL_PATH"
+#__bin_nutch linkrank "${commonOptions[@]}" -webgraphdb "$CRAWL_PATH"
+
+#echo "Running ScoreUpdater Job with $CRAWL_PATH/crawldb and  WebGraph within $CRAWL_PATH"
+#__bin_nutch scoreupdater "${commonOptions[@]}" -crawldb "$CRAWL_PATH"/crawldb -webgraphdb "$CRAWL_PATH"
+
+#echo "Running NodeDumper on WebGraph within $CRAWL_PATH and dumping output to $CRAWL_PATH/dump/scores"
+#__bin_nutch nodedumper "${commonOptions[@]}" -scores -topn 1000 -webgraphdb "$CRAWL_PATH" -output "$CRAWL_PATH"/dump/scores
+
 exit 0
diff --git a/src/java/org/apache/nutch/crawl/CrawlDbReader.java b/src/java/org/apache/nutch/crawl/CrawlDbReader.java
index 603b2e3..1bb8160 100644
--- a/src/java/org/apache/nutch/crawl/CrawlDbReader.java
+++ b/src/java/org/apache/nutch/crawl/CrawlDbReader.java
@@ -79,8 +79,11 @@
 import com.fasterxml.jackson.core.JsonGenerationException;
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.core.util.MinimalPrettyPrinter;
+import com.fasterxml.jackson.databind.JsonSerializer;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.databind.SerializerProvider;
+import com.fasterxml.jackson.databind.module.SimpleModule;
 
 /**
  * Read utility for the CrawlDB.
@@ -243,6 +246,9 @@
         this.out = out;
         jsonMapper.getFactory()
             .configure(JsonGenerator.Feature.ESCAPE_NON_ASCII, true);
+        SimpleModule module = new SimpleModule();
+        module.addSerializer(Writable.class, new WritableSerializer());
+        jsonMapper.registerModule(module);
         jsonWriter = jsonMapper.writer(new JsonIndenter());
       }
 
@@ -295,6 +301,36 @@
       DataOutputStream fileOut = fs.create(new Path(dir, name), context);
       return new LineRecordWriter(fileOut);
     }
+
+    public static class WritableSerializer extends JsonSerializer<Writable> {
+      @Override
+      public void serialize(Writable obj, JsonGenerator jgen,
+          SerializerProvider provider) throws IOException {
+        if (obj instanceof org.apache.hadoop.io.NullWritable) {
+          jgen.writeNull();
+        } else if (obj instanceof org.apache.hadoop.io.BooleanWritable) {
+          jgen.writeBoolean(((org.apache.hadoop.io.BooleanWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.IntWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.IntWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.VIntWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.VIntWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.LongWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.LongWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.VLongWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.VLongWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.ByteWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.ByteWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.FloatWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.FloatWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.DoubleWritable) {
+          jgen.writeNumber(((org.apache.hadoop.io.DoubleWritable) obj).get());
+        } else if (obj instanceof org.apache.hadoop.io.BytesWritable) {
+          jgen.writeBinary(((org.apache.hadoop.io.BytesWritable) obj).getBytes());
+        } else {
+          jgen.writeString(obj.toString());
+        }
+      }
+    }
   }
 
   public static class CrawlDbStatMapper
diff --git a/src/java/org/apache/nutch/indexer/IndexerMapReduce.java b/src/java/org/apache/nutch/indexer/IndexerMapReduce.java
index 3e9bc15..42093b7 100644
--- a/src/java/org/apache/nutch/indexer/IndexerMapReduce.java
+++ b/src/java/org/apache/nutch/indexer/IndexerMapReduce.java
@@ -19,6 +19,8 @@
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.Collection;
+import java.util.Locale;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.commons.codec.binary.Base64;
@@ -274,11 +276,11 @@
           // Handle robots meta? https://issues.apache.org/jira/browse/NUTCH-1434
           if (deleteRobotsNoIndex) {
             // Get the robots meta data
-            String robotsMeta = parseData.getMeta("robots");
+            String robotsMeta = parseData.getMeta(Nutch.ROBOTS_METATAG);
 
             // Has it a noindex for this url?
-            if (robotsMeta != null
-                && robotsMeta.toLowerCase().indexOf("noindex") != -1) {
+            if (robotsMeta != null && robotsMeta.toLowerCase(Locale.ROOT)
+                .indexOf("noindex") != -1) {
               // Delete it!
               context.write(key, DELETE_ACTION);
               context.getCounter("IndexerStatus", "deleted (robots=noindex)").increment(1);
diff --git a/src/java/org/apache/nutch/metadata/Metadata.java b/src/java/org/apache/nutch/metadata/Metadata.java
index 6eb27c9..38724d4 100644
--- a/src/java/org/apache/nutch/metadata/Metadata.java
+++ b/src/java/org/apache/nutch/metadata/Metadata.java
@@ -232,13 +232,28 @@
     return true;
   }
 
+  @Override
   public String toString() {
-    StringBuffer buf = new StringBuffer();
+    return toString("=", " ");
+  }
+
+  /**
+   * @param separator
+   *          separator between Metadata's key-value pairs
+   * @param keyValueSeparator
+   *          separator between key and value
+   * @return list of all key-value pairs in Metadata using the provided
+   *         separators
+   */
+  public String toString(String separator, String keyValueSeparator) {
+    StringBuilder buf = new StringBuilder();
     String[] names = names();
     for (int i = 0; i < names.length; i++) {
       String[] values = _getValues(names[i]);
       for (int j = 0; j < values.length; j++) {
-        buf.append(names[i]).append("=").append(values[j]).append(" ");
+        if (buf.length() > 0)
+          buf.append(separator);
+        buf.append(names[i]).append(keyValueSeparator).append(values[j]);
       }
     }
     return buf.toString();
@@ -278,3 +293,4 @@
   }
 
 }
+  
\ No newline at end of file
diff --git a/src/java/org/apache/nutch/metadata/Nutch.java b/src/java/org/apache/nutch/metadata/Nutch.java
index d28808d..0cfb263 100644
--- a/src/java/org/apache/nutch/metadata/Nutch.java
+++ b/src/java/org/apache/nutch/metadata/Nutch.java
@@ -52,6 +52,12 @@
 
 	public static final String FETCH_STATUS_KEY = "_fst_";
 
+  /**
+   * Name to store the <a href="https://www.robotstxt.org/meta.html">robots
+   * metatag</a> in {@link org.apache.nutch.parse.ParseData}'s metadata.
+   */
+  public static final String ROBOTS_METATAG = "robots";
+
 	/**
 	 * Sites may request that search engines don't provide access to cached
 	 * documents.
diff --git a/src/java/org/apache/nutch/parse/ParseData.java b/src/java/org/apache/nutch/parse/ParseData.java
index e88c7ac..36d0daa 100644
--- a/src/java/org/apache/nutch/parse/ParseData.java
+++ b/src/java/org/apache/nutch/parse/ParseData.java
@@ -188,22 +188,25 @@
         && this.parseMeta.equals(other.parseMeta);
   }
 
+  @Override
   public String toString() {
-    StringBuffer buffer = new StringBuffer();
+    StringBuilder buffer = new StringBuilder();
 
-    buffer.append("Version: " + version + "\n");
-    buffer.append("Status: " + status + "\n");
-    buffer.append("Title: " + title + "\n");
+    buffer.append("Version: ").append(version).append("\n");
+    buffer.append("Status: ").append(status).append("\n");
+    buffer.append("Title: ").append(title ).append("\n");
 
     if (outlinks != null) {
-      buffer.append("Outlinks: " + outlinks.length + "\n");
+      buffer.append("Outlinks: ").append(outlinks.length).append("\n");
       for (int i = 0; i < outlinks.length; i++) {
-        buffer.append("  outlink: " + outlinks[i] + "\n");
+        buffer.append("  outlink: ").append(outlinks[i]).append("\n");
       }
     }
 
-    buffer.append("Content Metadata: " + contentMeta + "\n");
-    buffer.append("Parse Metadata: " + parseMeta + "\n");
+    buffer.append("Content Metadata:\n  ")
+        .append(contentMeta.toString("\n  ", " = ")).append("\n");
+    buffer.append("Parse Metadata:\n  ")
+        .append(parseMeta.toString("\n  ", " = ")).append("\n");
 
     return buffer.toString();
   }
diff --git a/src/java/org/apache/nutch/parse/ParserChecker.java b/src/java/org/apache/nutch/parse/ParserChecker.java
index 4dbfcfa..97dcd39 100644
--- a/src/java/org/apache/nutch/parse/ParserChecker.java
+++ b/src/java/org/apache/nutch/parse/ParserChecker.java
@@ -288,8 +288,8 @@
         }
       }
 
-      output.append(turl + "\n");
-      output.append(parse.getData() + "\n");
+      output.append(turl).append("\n");
+      output.append(parse.getData()).append("\n");
       if (dumpText) {
         output.append(parse.getText());
       }
diff --git a/src/plugin/indexer-csv/src/java/org/apache/nutch/indexwriter/csv/CSVIndexWriter.java b/src/plugin/indexer-csv/src/java/org/apache/nutch/indexwriter/csv/CSVIndexWriter.java
index 160d03d..99c0702 100644
--- a/src/plugin/indexer-csv/src/java/org/apache/nutch/indexwriter/csv/CSVIndexWriter.java
+++ b/src/plugin/indexer-csv/src/java/org/apache/nutch/indexwriter/csv/CSVIndexWriter.java
@@ -405,13 +405,12 @@
     if (max > maxFieldLength) {
       max = maxFieldLength;
     }
-    while (nextQuoteChar > 0 && nextQuoteChar < max) {
+    while (nextQuoteChar >= 0 && nextQuoteChar < max) {
       csvout.write(value.substring(start, nextQuoteChar).getBytes(encoding));
       csvout.write(escapeCharacter.bytes);
       csvout.write(quoteCharacter.bytes);
       start = nextQuoteChar + 1;
       nextQuoteChar = quoteCharacter.find(value, start);
-      if (nextQuoteChar > max) break;
     }
     csvout.write(value.substring(start, max).getBytes(encoding));
   }
diff --git a/src/plugin/indexer-csv/src/test/org/apache/nutch/indexwriter/csv/TestCSVIndexWriter.java b/src/plugin/indexer-csv/src/test/org/apache/nutch/indexwriter/csv/TestCSVIndexWriter.java
index 761d042..5714cc2 100644
--- a/src/plugin/indexer-csv/src/test/org/apache/nutch/indexwriter/csv/TestCSVIndexWriter.java
+++ b/src/plugin/indexer-csv/src/test/org/apache/nutch/indexwriter/csv/TestCSVIndexWriter.java
@@ -159,6 +159,15 @@
   }
 
   @Test
+  public void testCSVescapeLeadingQuotes() throws IOException {
+    String[] params = { CSVConstants.CSV_FIELDS, "test" };
+    String[] fields = { "test", "\"quote\"" };
+    String csv = getCSV(params, fields);
+    assertEquals("Leading quotes inside a quoted field must be escaped",
+        "\"\"\"quote\"\"\"", csv.trim());
+  }
+
+  @Test
   public void testCSVclipMaxLength() throws IOException {
     String[] params = { CSVConstants.CSV_FIELDS, "test",
         CSVConstants.CSV_MAXFIELDLENGTH, "8" };
diff --git a/src/plugin/parse-html/src/java/org/apache/nutch/parse/html/HTMLMetaProcessor.java b/src/plugin/parse-html/src/java/org/apache/nutch/parse/html/HTMLMetaProcessor.java
index 4e7ef14..d655a96 100644
--- a/src/plugin/parse-html/src/java/org/apache/nutch/parse/html/HTMLMetaProcessor.java
+++ b/src/plugin/parse-html/src/java/org/apache/nutch/parse/html/HTMLMetaProcessor.java
@@ -18,6 +18,7 @@
 
 import java.net.URL;
 
+import org.apache.nutch.metadata.Nutch;
 import org.apache.nutch.parse.HTMLMetaTags;
 import org.w3c.dom.NamedNodeMap;
 import org.w3c.dom.Node;
@@ -79,7 +80,7 @@
           if (contentNode != null) {
             String name = nameNode.getNodeValue().toLowerCase();
             metaTags.getGeneralTags().add(name, contentNode.getNodeValue());
-            if ("robots".equals(name)) {
+            if (Nutch.ROBOTS_METATAG.equals(name)) {
               String directives = contentNode.getNodeValue().toLowerCase();
               int index = directives.indexOf("none");
 
diff --git a/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/HTMLMetaProcessor.java b/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/HTMLMetaProcessor.java
index 58f93ac..8584df7 100644
--- a/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/HTMLMetaProcessor.java
+++ b/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/HTMLMetaProcessor.java
@@ -18,7 +18,9 @@
 
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.util.Locale;
 
+import org.apache.nutch.metadata.Nutch;
 import org.apache.nutch.parse.HTMLMetaTags;
 import org.w3c.dom.NamedNodeMap;
 import org.w3c.dom.Node;
@@ -66,7 +68,7 @@
         // Retrieves name, http-equiv and content attribues
         for (int i = 0; i < attrs.getLength(); i++) {
           Node attr = attrs.item(i);
-          String attrName = attr.getNodeName().toLowerCase();
+          String attrName = attr.getNodeName().toLowerCase(Locale.ROOT);
           if (attrName.equals("name")) {
             nameNode = attr;
           } else if (attrName.equals("http-equiv")) {
@@ -78,10 +80,11 @@
 
         if (nameNode != null) {
           if (contentNode != null) {
-            String name = nameNode.getNodeValue().toLowerCase();
+            String name = nameNode.getNodeValue().toLowerCase(Locale.ROOT);
             metaTags.getGeneralTags().add(name, contentNode.getNodeValue());
-            if ("robots".equals(name)) {
-              String directives = contentNode.getNodeValue().toLowerCase();
+            if (Nutch.ROBOTS_METATAG.equals(name)) {
+              String directives = contentNode.getNodeValue()
+                  .toLowerCase(Locale.ROOT);
               int index = directives.indexOf("none");
 
               if (index >= 0) {
@@ -112,12 +115,14 @@
             } // end if (name == robots)
             // meta names added/transformed by Tika
             else if (name.equals("pragma")) {
-              String content = contentNode.getNodeValue().toLowerCase();
+              String content = contentNode.getNodeValue()
+                  .toLowerCase(Locale.ROOT);
               if (content.contains("no-cache")) {
                 metaTags.setNoCache();
               }
             } else if (name.equals("refresh")) {
-              String content = contentNode.getNodeValue().toLowerCase();
+              String content = contentNode.getNodeValue()
+                  .toLowerCase(Locale.ROOT);
               setRefresh(metaTags, content, currURL);
             } else if (name.equals("content-location")) {
               String urlString = contentNode.getNodeValue();
@@ -138,11 +143,11 @@
 
         if (equivNode != null) {
           if (contentNode != null) {
-            String name = equivNode.getNodeValue().toLowerCase();
+            String name = equivNode.getNodeValue().toLowerCase(Locale.ROOT);
             String content = contentNode.getNodeValue();
             metaTags.getHttpEquivTags().setProperty(name, content);
             if ("pragma".equals(name)) {
-              content = content.toLowerCase();
+              content = content.toLowerCase(Locale.ROOT);
               int index = content.indexOf("no-cache");
               if (index >= 0)
                 metaTags.setNoCache();
@@ -203,7 +208,7 @@
     }
     URL refreshUrl = null;
     if (metaTags.getRefresh() && idx != -1) { // set the URL
-      idx = content.toLowerCase().indexOf("url=");
+      idx = content.toLowerCase(Locale.ROOT).indexOf("url=");
       if (idx == -1) { // assume a mis-formatted entry with just the
                        // url
         idx = content.indexOf(';') + 1;
diff --git a/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/TikaParser.java b/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/TikaParser.java
index f2461fe..d97e8b4 100644
--- a/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/TikaParser.java
+++ b/src/plugin/parse-tika/src/java/org/apache/nutch/parse/tika/TikaParser.java
@@ -218,8 +218,14 @@
       if (tikaMDName.equalsIgnoreCase(Metadata.TITLE))
         continue;
       String[] values = tikamd.getValues(tikaMDName);
-      for (String v : values)
+      for (String v : values) {
         nutchMetadata.add(tikaMDName, v);
+        if (tikaMDName.equalsIgnoreCase(Nutch.ROBOTS_METATAG)
+            && nutchMetadata.get(Nutch.ROBOTS_METATAG) == null) {
+          // NUTCH-2720 force lowercase robots directive
+          nutchMetadata.add(Nutch.ROBOTS_METATAG, v);
+        }
+      }
     }
 
     // no outlinks? try OutlinkExtractor e.g works for mime types where no