[MRQL-94] Improve the configuration file to work with default cluster setups
diff --git a/bsp/src/main/java/org/apache/mrql/BSPEvaluator.gen b/bsp/src/main/java/org/apache/mrql/BSPEvaluator.gen
index 37da613..27d9651 100644
--- a/bsp/src/main/java/org/apache/mrql/BSPEvaluator.gen
+++ b/bsp/src/main/java/org/apache/mrql/BSPEvaluator.gen
@@ -41,7 +41,8 @@
                 conf.set("bsp.master.address",System.getenv("BSP_MASTER_ADDRESS"));
                 conf.set("hama.zookeeper.quorum",System.getenv("HAMA_ZOOKEEPER_QUORUM"));
                 conf.setInt("bsp.local.tasks.maximum",Config.nodes);
-                conf.set("fs.defaultFS",System.getenv("FS_DEFAULT_NAME"));
+                if (!System.getenv("FS_DEFAULT_NAME").equals(""))
+                    conf.set("fs.defaultFS",System.getenv("FS_DEFAULT_NAME"));
             }
     }
 
diff --git a/conf/mrql-env.sh b/conf/mrql-env.sh
index 77da529..6462949 100644
--- a/conf/mrql-env.sh
+++ b/conf/mrql-env.sh
@@ -34,7 +34,7 @@
 
 
 # Required: The Java installation directory
-if [[ !(-f ${JAVA_HOME}) ]]; then
+if [ ! -f ${JAVA_HOME}) ]; then
    export JAVA_HOME=/usr/lib/jvm/java-8-oracle
 fi
 
@@ -47,20 +47,22 @@
 JLINE_JAR=${HOME}/.m2/repository/jline/jline/1.0/jline-1.0.jar
 
 
-# Required: Hadoop configuration. Supports versions 1.x and 2.x (YARN)
-HADOOP_VERSION=2.7.1
+# Hadoop configuration. Supports versions 1.x and 2.x (YARN)
 # The Hadoop installation directory
-HADOOP_HOME=${HOME}/hadoop-${HADOOP_VERSION}
-# The Hadoop configuration directory (where core-site.xml is)
+if [ ! -f ${HADOOP_HOME}) ]; then
+    HADOOP_VERSION=2.7.1
+    HADOOP_HOME=${HOME}/hadoop-${HADOOP_VERSION}
+fi
+# The Hadoop configuration directory. Set it to empty to use the default
 HADOOP_CONFIG=${HADOOP_HOME}/etc/hadoop
-# The Hadoop job tracker (as defined in mapred-site.xml)
-MAPRED_JOB_TRACKER=localhost:9001
-# The HDFS namenode URI (as defined in core-site.xml)
-FS_DEFAULT_NAME=hdfs://localhost:9000/
+# The Hadoop job tracker (eg, localhost:9001). If empty, it is the one defined in mapred-site.xml
+MAPRED_JOB_TRACKER=
+# The HDFS namenode URI (eg, hdfs://localhost:9000/). If empty, it is the one defined in core-site.xml
+FS_DEFAULT_NAME=
 
 
-# Optional: Hama configuration. Supports versions 0.6.2, 0.6.3, 0.6.4, and 0.7.0
-HAMA_VERSION=0.7.0
+# Optional: Hama configuration. Supports versions 0.6.2, 0.6.3, 0.6.4, and 0.7.0, and 0.7.1
+HAMA_VERSION=0.7.1
 # The Hama installation directory
 HAMA_HOME=${HOME}/hama-${HAMA_VERSION}
 # The Hama configuration directory
@@ -93,11 +95,11 @@
 SPARK_EXECUTOR_MEMORY=1G
 
 
-# Optional: Flink configuration. Supports version 1.0.2
+# Optional: Flink configuration. Supports version 1.0.2 and 1.0.3
 FLINK_VERSION=1.0.2
 # Flink installation directory
 FLINK_HOME=${HOME}/flink-${FLINK_VERSION}
-# number of slots per TaskManager (typically, the number of CPUs per machine)
+# number of slots per TaskManager (typically, the number of cores per node)
 FLINK_SLOTS=4
 # memory per TaskManager
 FLINK_TASK_MANAGER_MEMORY=2048
diff --git a/flink/src/main/java/org/apache/mrql/FlinkEvaluator.gen b/flink/src/main/java/org/apache/mrql/FlinkEvaluator.gen
index ba5cf1b..f66d422 100644
--- a/flink/src/main/java/org/apache/mrql/FlinkEvaluator.gen
+++ b/flink/src/main/java/org/apache/mrql/FlinkEvaluator.gen
@@ -80,9 +80,11 @@
 		master_host = m[0];
 		master_port = Integer.parseInt(m[1]);
 	    };
-            fs_default_name = System.getenv("FS_DEFAULT_NAME");
+            if (!System.getenv("FS_DEFAULT_NAME").equals("")) {
+                fs_default_name = System.getenv("FS_DEFAULT_NAME");
+                Plan.conf.set("fs.defaultFS",fs_default_name);
+            } else fs_default_name = Plan.conf.get("fs.defaultFS");
             data_source_dir_name = absolute_path("tmp/data_source_dir.txt");
-	    Plan.conf.set("fs.defaultFS",fs_default_name);
         }
     }
 
diff --git a/mapreduce/src/main/java/org/apache/mrql/MapReduceEvaluator.gen b/mapreduce/src/main/java/org/apache/mrql/MapReduceEvaluator.gen
index ac6e93a..0bb41e0 100644
--- a/mapreduce/src/main/java/org/apache/mrql/MapReduceEvaluator.gen
+++ b/mapreduce/src/main/java/org/apache/mrql/MapReduceEvaluator.gen
@@ -37,8 +37,10 @@
                 conf.set("mapreduce.framework.name","local");
                 conf.set("fs.defaultFS","file:///"); // doesn't work for hadoop 1.*
             } else {
-                conf.set("mapred.job.tracker",System.getenv("MAPRED_JOB_TRACKER"));
-                conf.set("fs.defaultFS",System.getenv("FS_DEFAULT_NAME")); // doesn't work for hadoop 1.*
+                if (!System.getenv("MAPRED_JOB_TRACKER").equals(""))
+                    conf.set("mapred.job.tracker",System.getenv("MAPRED_JOB_TRACKER"));
+                if (!System.getenv("FS_DEFAULT_NAME").equals(""))
+                    conf.set("fs.defaultFS",System.getenv("FS_DEFAULT_NAME")); // doesn't work for hadoop 1.*
             }
     }
 
diff --git a/pom.xml b/pom.xml
index cb1ac62..403f57f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -46,10 +46,10 @@
   <properties>
     <hadoop.version>1.2.1</hadoop.version>
     <yarn.version>2.7.1</yarn.version>
-    <hama.version>0.7.0</hama.version>
+    <hama.version>0.7.1</hama.version>
     <spark.version>1.6.2</spark.version>
     <scala.version>2.10</scala.version>
-    <flink.version>1.0.2</flink.version>
+    <flink.version>1.0.3</flink.version>
     <skipTests>true</skipTests>
   </properties>
 
diff --git a/spark/src/main/java/org/apache/mrql/SparkEvaluator.gen b/spark/src/main/java/org/apache/mrql/SparkEvaluator.gen
index 616da62..3ded60e 100644
--- a/spark/src/main/java/org/apache/mrql/SparkEvaluator.gen
+++ b/spark/src/main/java/org/apache/mrql/SparkEvaluator.gen
@@ -79,7 +79,8 @@
             spark_conf.set("spark.executor.instances",""+Config.nodes);
             spark_context = new JavaSparkContext(spark_conf);
             Plan.conf = spark_context.hadoopConfiguration();
-            FileSystem.setDefaultUri(Plan.conf,System.getenv("FS_DEFAULT_NAME"));
+            if (!System.getenv("FS_DEFAULT_NAME").equals(""))
+                FileSystem.setDefaultUri(Plan.conf,System.getenv("FS_DEFAULT_NAME"));
         };
         if (Config.stream_window > 0 && (Config.local_mode || Config.hadoop_mode))
             stream_context = new JavaStreamingContext(spark_context,new Duration(Config.stream_window));