HDFS-1907. Fix position read for reading still-being-written file in DFSInputStream.  Contributed by John George


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1131124 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index 5253cc0..d877672 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -678,6 +678,9 @@
     HDFS-1995. Federation: Minor bug fixes and modification cluster web UI.
     (Tanping Wang via suresh)
 
+    HDFS-1907. Fix position read for reading still-being-written file in
+    DFSInputStream.  (John George via szetszwo)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES
diff --git a/src/java/org/apache/hadoop/hdfs/DFSInputStream.java b/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 0052d65..0051cc8 100644
--- a/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -306,11 +306,22 @@
       blocks = getFinalizedBlockRange(offset, length);
     }
     else {
-      if (length + offset > locatedBlocks.getFileLength()) {
+      final boolean readPastEnd = offset + length > locatedBlocks.getFileLength();
+      /* if requested length is greater than current file length
+       * then, it could possibly be from the current block being
+       * written to. First get the finalized block range and then
+       * if necessary, get the length of last block being written
+       * to.
+       */
+      if (readPastEnd)
         length = locatedBlocks.getFileLength() - offset;
-      }
+
       blocks = getFinalizedBlockRange(offset, length);
-      blocks.add(locatedBlocks.getLastLocatedBlock());
+      /* requested length is greater than what finalized blocks 
+       * have.
+       */
+      if (readPastEnd)
+        blocks.add(locatedBlocks.getLastLocatedBlock());
     }
     return blocks;
   }
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
index 181319c..e293c96 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
@@ -106,12 +106,23 @@
     positionReadOption = false;
     String fname = filenameOption;
     
+    positionReadOption = false;   // sequential read
     // need to run long enough to fail: takes 25 to 35 seec on Mac
     int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
     LOG.info("Summary status from test1: status= " + stat);
-    Assert.assertTrue(stat == 0);
+    Assert.assertEquals(0, stat);
   }
-      
+
+  /** Junit Test position read while writing. */
+  @Test
+  public void TestWriteReadPos() throws IOException {
+    String fname = filenameOption;
+    positionReadOption = true;   // position read
+    int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
+    Assert.assertEquals(0, stat);
+  }
+
+   
   // equivalent of TestWriteRead1
   private int clusterTestWriteRead1() throws IOException {
     int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption);