Code refactoring
diff --git a/src/main/java/org/apache/horn/bsp/AbstractLayeredNeuralNetwork.java b/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
similarity index 98%
rename from src/main/java/org/apache/horn/bsp/AbstractLayeredNeuralNetwork.java
rename to src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
index b18eb44..f87e771 100644
--- a/src/main/java/org/apache/horn/bsp/AbstractLayeredNeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -43,7 +43,7 @@
  * form a bipartite weighted graph.
  * 
  */
-abstract class AbstractLayeredNeuralNetwork extends NeuralNetwork {
+abstract class AbstractLayeredNeuralNetwork extends AbstractNeuralNetwork {
 
   private static final double DEFAULT_MOMENTUM_WEIGHT = 0.1;
 
diff --git a/src/main/java/org/apache/horn/bsp/NeuralNetwork.java b/src/main/java/org/apache/horn/core/AbstractNeuralNetwork.java
similarity index 96%
rename from src/main/java/org/apache/horn/bsp/NeuralNetwork.java
rename to src/main/java/org/apache/horn/core/AbstractNeuralNetwork.java
index 051881d..45f56a3 100644
--- a/src/main/java/org/apache/horn/bsp/NeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/AbstractNeuralNetwork.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -46,7 +46,7 @@
  * between neurons.
  * 
  */
-abstract class NeuralNetwork implements Writable {
+public abstract class AbstractNeuralNetwork implements Writable {
   protected HamaConfiguration conf;
   protected FileSystem fs;
 
@@ -62,17 +62,17 @@
 
   protected FeatureTransformer featureTransformer;
 
-  public NeuralNetwork() {
+  public AbstractNeuralNetwork() {
     this.learningRate = DEFAULT_LEARNING_RATE;
     this.modelType = this.getClass().getSimpleName();
     this.featureTransformer = new DefaultFeatureTransformer();
   }
 
-  public NeuralNetwork(String modelPath) {
+  public AbstractNeuralNetwork(String modelPath) {
     this.modelPath = modelPath;
   }
 
-  public NeuralNetwork(HamaConfiguration conf, String modelPath) {
+  public AbstractNeuralNetwork(HamaConfiguration conf, String modelPath) {
     try {
       this.conf = conf;
       this.fs = FileSystem.get(conf);
diff --git a/src/main/java/org/apache/horn/bsp/NeuralNetworkTrainer.java b/src/main/java/org/apache/horn/core/AbstractNeuralNetworkTrainer.java
similarity index 83%
rename from src/main/java/org/apache/horn/bsp/NeuralNetworkTrainer.java
rename to src/main/java/org/apache/horn/core/AbstractNeuralNetworkTrainer.java
index 648e86b..3547a1a 100644
--- a/src/main/java/org/apache/horn/bsp/NeuralNetworkTrainer.java
+++ b/src/main/java/org/apache/horn/core/AbstractNeuralNetworkTrainer.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hama.bsp.BSP;
@@ -32,27 +33,28 @@
 import org.apache.hama.ml.util.FeatureTransformer;
 
 /**
- * The trainer that is used to train the {@link SmallLayeredNeuralNetwork} with
+ * The trainer that is used to train the {@link LayeredNeuralNetwork} with
  * BSP. The trainer would read the training data and obtain the trained
  * parameters of the model.
  * 
  */
-public abstract class NeuralNetworkTrainer extends
-    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> {
+public abstract class AbstractNeuralNetworkTrainer
+    extends
+    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> {
 
   protected static final Log LOG = LogFactory
-      .getLog(NeuralNetworkTrainer.class);
+      .getLog(AbstractNeuralNetworkTrainer.class);
 
   protected Configuration conf;
   protected int maxIteration;
   protected int batchSize;
   protected String trainingMode;
-  
+
   protected FeatureTransformer featureTransformer;
-  
+
   @Override
   final public void setup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> peer)
       throws IOException, SyncException, InterruptedException {
     conf = peer.getConfiguration();
     featureTransformer = new DefaultFeatureTransformer();
@@ -68,7 +70,7 @@
    * @throws InterruptedException
    */
   protected void extraSetup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> peer)
       throws IOException, SyncException, InterruptedException {
 
   }
@@ -78,12 +80,12 @@
    */
   @Override
   public abstract void bsp(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> peer)
       throws IOException, SyncException, InterruptedException;
 
   @Override
   public void cleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> peer)
       throws IOException {
     this.extraCleanup(peer);
     // write model to modelPath
@@ -98,7 +100,7 @@
    * @throws InterruptedException
    */
   protected void extraCleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, Synapse<DoubleWritable, DoubleWritable>> peer)
       throws IOException {
 
   }
diff --git a/src/main/java/org/apache/horn/bsp/AutoEncoder.java b/src/main/java/org/apache/horn/core/AutoEncoder.java
similarity index 95%
rename from src/main/java/org/apache/horn/bsp/AutoEncoder.java
rename to src/main/java/org/apache/horn/core/AutoEncoder.java
index 8ea2930..f638245 100644
--- a/src/main/java/org/apache/horn/bsp/AutoEncoder.java
+++ b/src/main/java/org/apache/horn/core/AutoEncoder.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 import java.util.Map;
@@ -34,14 +34,14 @@
 
 /**
  * AutoEncoder is a model used for dimensional reduction and feature learning.
- * It is a special kind of {@link NeuralNetwork} that consists of three layers
+ * It is a special kind of {@link AbstractNeuralNetwork} that consists of three layers
  * of neurons, where the first layer and third layer contains the same number of
  * neurons.
  * 
  */
 public class AutoEncoder {
 
-  private final SmallLayeredNeuralNetwork model;
+  private final LayeredNeuralNetwork model;
 
   /**
    * Initialize the autoencoder.
@@ -51,7 +51,7 @@
    *          information.
    */
   public AutoEncoder(int inputDimensions, int compressedDimensions) {
-    model = new SmallLayeredNeuralNetwork();
+    model = new LayeredNeuralNetwork();
     model.addLayer(inputDimensions, false,
         FunctionFactory.createDoubleFunction("Sigmoid"));
     model.addLayer(compressedDimensions, false,
@@ -65,7 +65,7 @@
   }
 
   public AutoEncoder(HamaConfiguration conf, String modelPath) {
-    model = new SmallLayeredNeuralNetwork(conf, modelPath);
+    model = new LayeredNeuralNetwork(conf, modelPath);
   }
 
   public AutoEncoder setModelPath(String modelPath) {
diff --git a/src/main/java/org/apache/horn/bsp/HornJob.java b/src/main/java/org/apache/horn/core/HornJob.java
similarity index 94%
rename from src/main/java/org/apache/horn/bsp/HornJob.java
rename to src/main/java/org/apache/horn/core/HornJob.java
index 4521b87..82dcad8 100644
--- a/src/main/java/org/apache/horn/bsp/HornJob.java
+++ b/src/main/java/org/apache/horn/core/HornJob.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 
@@ -26,14 +26,14 @@
 
 public class HornJob extends BSPJob {
 
-  SmallLayeredNeuralNetwork neuralNetwork;
+  LayeredNeuralNetwork neuralNetwork;
 
   public HornJob(HamaConfiguration conf, Class<?> exampleClass)
       throws IOException {
     super(conf);
     this.setJarByClass(exampleClass);
 
-    neuralNetwork = new SmallLayeredNeuralNetwork();
+    neuralNetwork = new LayeredNeuralNetwork();
   }
 
   public void inputLayer(int featureDimension, Class<? extends Function> func) {
@@ -79,7 +79,7 @@
     this.conf.setDouble("mlp.momentum.weight", momentumWeight);
   }
 
-  public SmallLayeredNeuralNetwork getNeuralNetwork() {
+  public LayeredNeuralNetwork getNeuralNetwork() {
     return neuralNetwork;
   }
 
diff --git a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetwork.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
similarity index 97%
rename from src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetwork.java
rename to src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
index 0ea8e51..afccbff 100644
--- a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -62,10 +62,10 @@
  * form a bipartite weighted graph.
  * 
  */
-public class SmallLayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
+public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
 
   private static final Log LOG = LogFactory
-      .getLog(SmallLayeredNeuralNetwork.class);
+      .getLog(LayeredNeuralNetwork.class);
 
   public static Class<Neuron<Synapse<DoubleWritable, DoubleWritable>>> neuronClass;
 
@@ -82,14 +82,14 @@
 
   protected double regularizationWeight;
 
-  public SmallLayeredNeuralNetwork() {
+  public LayeredNeuralNetwork() {
     this.layerSizeList = Lists.newArrayList();
     this.weightMatrixList = Lists.newArrayList();
     this.prevWeightUpdatesList = Lists.newArrayList();
     this.squashingFunctionList = Lists.newArrayList();
   }
 
-  public SmallLayeredNeuralNetwork(HamaConfiguration conf, String modelPath) {
+  public LayeredNeuralNetwork(HamaConfiguration conf, String modelPath) {
     super(conf, modelPath);
     this.regularizationWeight = conf.getDouble("regularization.weight", 0);
   }
@@ -333,6 +333,7 @@
    * @param intermediateOutput The intermediateOutput of previous layer.
    * @return a new vector with the result of the operation.
    */
+  @SuppressWarnings("unchecked")
   protected DoubleVector forward(int fromLayer, DoubleVector intermediateOutput) {
     DoubleMatrix weightMatrix = this.weightMatrixList.get(fromLayer);
 
@@ -576,10 +577,10 @@
     this.writeModelToFile();
 
     // create job
-    BSPJob job = new BSPJob(conf, SmallLayeredNeuralNetworkTrainer.class);
+    BSPJob job = new BSPJob(conf, LayeredNeuralNetworkTrainer.class);
     job.setJobName("Small scale Neural Network training");
-    job.setJarByClass(SmallLayeredNeuralNetworkTrainer.class);
-    job.setBspClass(SmallLayeredNeuralNetworkTrainer.class);
+    job.setJarByClass(LayeredNeuralNetworkTrainer.class);
+    job.setBspClass(LayeredNeuralNetworkTrainer.class);
 
     job.getConfiguration().setClass("neuron.class", StandardNeuron.class,
         Neuron.class);
diff --git a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkTrainer.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
similarity index 88%
rename from src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkTrainer.java
rename to src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
index c3e258c..effd5b0 100644
--- a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkTrainer.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -38,20 +38,20 @@
 import com.google.common.base.Preconditions;
 
 /**
- * The trainer that train the {@link SmallLayeredNeuralNetwork} based on BSP
+ * The trainer that train the {@link LayeredNeuralNetwork} based on BSP
  * framework.
  * 
  */
-public final class SmallLayeredNeuralNetworkTrainer
+public final class LayeredNeuralNetworkTrainer
     extends
-    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> {
+    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> {
 
   private static final Log LOG = LogFactory
-      .getLog(SmallLayeredNeuralNetworkTrainer.class);
+      .getLog(LayeredNeuralNetworkTrainer.class);
 
   /* When given peer is master worker: base of parameter merge */
   /* When given peer is slave worker: neural network for training */
-  private SmallLayeredNeuralNetwork inMemoryModel;
+  private LayeredNeuralNetwork inMemoryModel;
 
   /* Job configuration */
   private HamaConfiguration conf;
@@ -76,7 +76,7 @@
    * @param peer
    * */
   private boolean isMaster(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer) {
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> peer) {
     return peer.getPeerIndex() == peer.getNumPeers() - 1;
   }
 
@@ -85,13 +85,13 @@
    * If the model path is specified, load the existing from storage location.
    */
   public void setup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer) {
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> peer) {
     // At least one master & slave worker exist.
     Preconditions.checkArgument(peer.getNumPeers() >= 2);
     this.conf = peer.getConfiguration();
 
     String modelPath = conf.get("model.path");
-    this.inMemoryModel = new SmallLayeredNeuralNetwork(conf, modelPath);
+    this.inMemoryModel = new LayeredNeuralNetwork(conf, modelPath);
 
     this.batchSize = conf.getInt("training.batch.size", 50);
     this.isConverge = new AtomicBoolean(false);
@@ -130,7 +130,7 @@
    * Write the trained model back to stored location.
    */
   public void cleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer) {
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> peer) {
     // write model to modelPath
     if (isMaster(peer)) {
       try {
@@ -144,7 +144,7 @@
 
   @Override
   public void bsp(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> peer)
       throws IOException, SyncException, InterruptedException {
     while (!this.isConverge.get()) {
       // each slave-worker calculate the matrices updates according to local
@@ -168,7 +168,7 @@
    * @throws IOException
    */
   private void calculateUpdates(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer)
+      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, ParameterMessage> peer)
       throws IOException {
 
     DoubleMatrix[] weightUpdates = new DoubleMatrix[this.inMemoryModel.weightMatrixList
@@ -189,7 +189,7 @@
         peer.readNext(key, value);
       }
       DoubleVector trainingInstance = value.getVector();
-      SmallLayeredNeuralNetwork.matricesAdd(weightUpdates,
+      LayeredNeuralNetwork.matricesAdd(weightUpdates,
           this.inMemoryModel.trainByInstance(trainingInstance));
       avgTrainingError += this.inMemoryModel.trainingError;
     }
@@ -201,11 +201,11 @@
     }
 
     // exchange parameter update with master
-    SmallLayeredNeuralNetworkMessage msg = new SmallLayeredNeuralNetworkMessage(
+    ParameterMessage msg = new ParameterMessage(
         avgTrainingError, false, weightUpdates,
         this.inMemoryModel.getPrevMatricesUpdates());
 
-    SmallLayeredNeuralNetworkMessage inMessage = proxy.merge(msg);
+    ParameterMessage inMessage = proxy.merge(msg);
     DoubleMatrix[] newWeights = inMessage.getCurMatrices();
     DoubleMatrix[] preWeightUpdates = inMessage.getPrevMatrices();
     this.inMemoryModel.setWeightMatrices(newWeights);
diff --git a/src/main/java/org/apache/horn/bsp/Neuron.java b/src/main/java/org/apache/horn/core/Neuron.java
similarity index 98%
rename from src/main/java/org/apache/horn/bsp/Neuron.java
rename to src/main/java/org/apache/horn/core/Neuron.java
index f122b6d..357b42f 100644
--- a/src/main/java/org/apache/horn/bsp/Neuron.java
+++ b/src/main/java/org/apache/horn/core/Neuron.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import org.apache.hadoop.io.Writable;
 import org.apache.hama.commons.math.DoubleFunction;
diff --git a/src/main/java/org/apache/horn/bsp/NeuronInterface.java b/src/main/java/org/apache/horn/core/NeuronInterface.java
similarity index 97%
rename from src/main/java/org/apache/horn/bsp/NeuronInterface.java
rename to src/main/java/org/apache/horn/core/NeuronInterface.java
index bcc1a5a..5e4c113 100644
--- a/src/main/java/org/apache/horn/bsp/NeuronInterface.java
+++ b/src/main/java/org/apache/horn/core/NeuronInterface.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 
diff --git a/src/main/java/org/apache/horn/bsp/ParameterMerger.java b/src/main/java/org/apache/horn/core/ParameterMerger.java
similarity index 89%
rename from src/main/java/org/apache/horn/bsp/ParameterMerger.java
rename to src/main/java/org/apache/horn/core/ParameterMerger.java
index 6df719a..512b402 100644
--- a/src/main/java/org/apache/horn/bsp/ParameterMerger.java
+++ b/src/main/java/org/apache/horn/core/ParameterMerger.java
@@ -15,13 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import org.apache.hama.ipc.VersionedProtocol;
 
 public interface ParameterMerger extends VersionedProtocol {
   long versionID = 1L;
 
-  SmallLayeredNeuralNetworkMessage merge(SmallLayeredNeuralNetworkMessage msg);
+  ParameterMessage merge(ParameterMessage msg);
 
 }
diff --git a/src/main/java/org/apache/horn/bsp/ParameterMergerServer.java b/src/main/java/org/apache/horn/core/ParameterMergerServer.java
similarity index 92%
rename from src/main/java/org/apache/horn/bsp/ParameterMergerServer.java
rename to src/main/java/org/apache/horn/core/ParameterMergerServer.java
index 47aab84..c76a4d0 100644
--- a/src/main/java/org/apache/horn/bsp/ParameterMergerServer.java
+++ b/src/main/java/org/apache/horn/core/ParameterMergerServer.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -31,7 +31,7 @@
   private static final Log LOG = LogFactory.getLog(ParameterMergerServer.class);
 
   /* The parameter merge base. */
-  protected SmallLayeredNeuralNetwork inMemoryModel;
+  protected LayeredNeuralNetwork inMemoryModel;
 
   /* To terminate or not to terminate. */
   protected AtomicBoolean isConverge;
@@ -60,7 +60,7 @@
   /* how many merges have been conducted? */
   protected int mergeCount = 0;
 
-  public ParameterMergerServer(SmallLayeredNeuralNetwork inMemoryModel,
+  public ParameterMergerServer(LayeredNeuralNetwork inMemoryModel,
       AtomicBoolean isConverge, int slaveCount, int mergeLimit,
       int convergenceCheckInterval) {
     this.inMemoryModel = inMemoryModel;
@@ -76,8 +76,8 @@
   }
 
   @Override
-  public SmallLayeredNeuralNetworkMessage merge(
-      SmallLayeredNeuralNetworkMessage msg) {
+  public ParameterMessage merge(
+      ParameterMessage msg) {
 
     double trainingError = msg.getTrainingError();
     DoubleMatrix[] weightUpdates = msg.getCurMatrices();
@@ -124,7 +124,7 @@
       }
     }
 
-    return new SmallLayeredNeuralNetworkMessage(0, this.isConverge.get(),
+    return new ParameterMessage(0, this.isConverge.get(),
         this.inMemoryModel.getWeightMatrices(),
         this.inMemoryModel.getPrevMatricesUpdates());
   }
diff --git a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkMessage.java b/src/main/java/org/apache/horn/core/ParameterMessage.java
similarity index 88%
rename from src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkMessage.java
rename to src/main/java/org/apache/horn/core/ParameterMessage.java
index 2f8c287..3905e25 100644
--- a/src/main/java/org/apache/horn/bsp/SmallLayeredNeuralNetworkMessage.java
+++ b/src/main/java/org/apache/horn/core/ParameterMessage.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -27,23 +27,22 @@
 import org.apache.hama.commons.math.DoubleMatrix;
 
 /**
- * NeuralNetworkMessage transmits the messages between peers during the training
- * of neural networks.
+ * ParameterMessage transmits the messages between workers and parameter
+ * servers during the training of neural networks.
  * 
  */
-public class SmallLayeredNeuralNetworkMessage implements Writable {
+public class ParameterMessage implements Writable {
 
   protected double trainingError;
   protected DoubleMatrix[] curMatrices;
   protected DoubleMatrix[] prevMatrices;
   protected boolean converge;
 
-  public SmallLayeredNeuralNetworkMessage() {
+  public ParameterMessage() {
   }
-  
-  public SmallLayeredNeuralNetworkMessage(double trainingError,
-      boolean converge, DoubleMatrix[] weightMatrices,
-      DoubleMatrix[] prevMatrices) {
+
+  public ParameterMessage(double trainingError, boolean converge,
+      DoubleMatrix[] weightMatrices, DoubleMatrix[] prevMatrices) {
     this.trainingError = trainingError;
     this.converge = converge;
     this.curMatrices = weightMatrices;
diff --git a/src/main/java/org/apache/horn/bsp/Synapse.java b/src/main/java/org/apache/horn/core/Synapse.java
similarity index 98%
rename from src/main/java/org/apache/horn/bsp/Synapse.java
rename to src/main/java/org/apache/horn/core/Synapse.java
index 61725f9..714767b 100644
--- a/src/main/java/org/apache/horn/bsp/Synapse.java
+++ b/src/main/java/org/apache/horn/core/Synapse.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.io.DataInput;
 import java.io.DataOutput;
diff --git a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
index f66344c..c3bf180 100644
--- a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
+++ b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
@@ -21,9 +21,9 @@
 
 import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hama.HamaConfiguration;
-import org.apache.horn.bsp.HornJob;
-import org.apache.horn.bsp.Neuron;
-import org.apache.horn.bsp.Synapse;
+import org.apache.horn.core.HornJob;
+import org.apache.horn.core.Neuron;
+import org.apache.horn.core.Synapse;
 import org.apache.horn.funcs.CrossEntropy;
 import org.apache.horn.funcs.Sigmoid;
 
@@ -101,7 +101,7 @@
       InterruptedException, ClassNotFoundException {
     if (args.length < 9) {
       System.out
-          .println("Usage: model_path training_set learning_rate momentum regularization_weight feature_dimension label_dimension max_iteration num_tasks");
+          .println("Usage: <MODEL_PATH> <INPUT_PATH> <LEARNING_RATE> <MOMEMTUM_WEIGHT> <REGULARIZATION_WEIGHT> <FEATURE_DIMENSION> <LABEL_DIMENSION> <MAX_ITERATION> <NUM_TASKS>");
       System.exit(1);
     }
     HornJob ann = createJob(new HamaConfiguration(), args[0], args[1],
diff --git a/src/main/java/org/apache/horn/examples/NeuralNetwork.java b/src/main/java/org/apache/horn/examples/NeuralNetwork.java
deleted file mode 100644
index 5c0afdf..0000000
--- a/src/main/java/org/apache/horn/examples/NeuralNetwork.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.horn.examples;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hama.HamaConfiguration;
-import org.apache.hama.commons.math.DenseDoubleVector;
-import org.apache.hama.commons.math.DoubleVector;
-import org.apache.horn.bsp.SmallLayeredNeuralNetwork;
-import org.apache.horn.funcs.FunctionFactory;
-
-/**
- * The example of using {@link SmallLayeredNeuralNetwork}, including the
- * training phase and labeling phase.
- */
-public class NeuralNetwork {
-
-  public static void main(String[] args) throws Exception {
-    if (args.length < 3) {
-      printUsage();
-      return;
-    }
-    HamaConfiguration conf = new HamaConfiguration();
-    String mode = args[0];
-    
-    if (mode.equalsIgnoreCase("label")) {
-      if (args.length < 4) {
-        printUsage();
-        return;
-      }
-
-      String featureDataPath = args[1];
-      String resultDataPath = args[2];
-      String modelPath = args[3];
-
-      SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork(conf, modelPath);
-
-      // process data in streaming approach
-      FileSystem fs = FileSystem.get(new URI(featureDataPath), conf);
-      BufferedReader br = new BufferedReader(new InputStreamReader(
-          fs.open(new Path(featureDataPath))));
-      Path outputPath = new Path(resultDataPath);
-      if (fs.exists(outputPath)) {
-        fs.delete(outputPath, true);
-      }
-      BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
-          fs.create(outputPath)));
-
-      String line = null;
-
-      while ((line = br.readLine()) != null) {
-        if (line.trim().length() == 0) {
-          continue;
-        }
-        String[] tokens = line.trim().split(",");
-        double[] vals = new double[tokens.length];
-        for (int i = 0; i < tokens.length; ++i) {
-          vals[i] = Double.parseDouble(tokens[i]);
-        }
-        DoubleVector instance = new DenseDoubleVector(vals);
-        DoubleVector result = ann.getOutput(instance);
-        double[] arrResult = result.toArray();
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < arrResult.length; ++i) {
-          sb.append(arrResult[i]);
-          if (i != arrResult.length - 1) {
-            sb.append(",");
-          } else {
-            sb.append("\n");
-          }
-        }
-        bw.write(sb.toString());
-      }
-
-      br.close();
-      bw.close();
-    } else if (mode.equals("train")) {
-      if (args.length < 5) {
-        printUsage();
-        return;
-      }
-
-      String trainingDataPath = args[1];
-      String trainedModelPath = args[2];
-
-      int featureDimension = Integer.parseInt(args[3]);
-      int labelDimension = Integer.parseInt(args[4]);
-
-      int iteration = 1000;
-      double learningRate = 0.4;
-      double momemtumWeight = 0.2;
-      double regularizationWeight = 0.01;
-
-      // parse parameters
-      if (args.length >= 6) {
-        try {
-          iteration = Integer.parseInt(args[5]);
-          System.out.printf("Iteration: %d\n", iteration);
-        } catch (NumberFormatException e) {
-          System.err
-              .println("MAX_ITERATION format invalid. It should be a positive number.");
-          return;
-        }
-      }
-      if (args.length >= 7) {
-        try {
-          learningRate = Double.parseDouble(args[6]);
-          System.out.printf("Learning rate: %f\n", learningRate);
-        } catch (NumberFormatException e) {
-          System.err
-              .println("LEARNING_RATE format invalid. It should be a positive double in range (0, 1.0)");
-          return;
-        }
-      }
-      if (args.length >= 8) {
-        try {
-          momemtumWeight = Double.parseDouble(args[7]);
-          System.out.printf("Momemtum weight: %f\n", momemtumWeight);
-        } catch (NumberFormatException e) {
-          System.err
-              .println("MOMEMTUM_WEIGHT format invalid. It should be a positive double in range (0, 1.0)");
-          return;
-        }
-      }
-      if (args.length >= 9) {
-        try {
-          regularizationWeight = Double.parseDouble(args[8]);
-          System.out
-              .printf("Regularization weight: %f\n", regularizationWeight);
-        } catch (NumberFormatException e) {
-          System.err
-              .println("REGULARIZATION_WEIGHT format invalid. It should be a positive double in range (0, 1.0)");
-          return;
-        }
-      }
-
-      // train the model
-      SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
-      // ann.setLearningRate(learningRate);
-      // ann.setMomemtumWeight(momemtumWeight);
-      // ann.setRegularizationWeight(regularizationWeight);
-      ann.addLayer(featureDimension, false,
-          FunctionFactory.createDoubleFunction("Sigmoid"));
-      ann.addLayer(featureDimension, false,
-          FunctionFactory.createDoubleFunction("Sigmoid"));
-      ann.addLayer(labelDimension, true,
-          FunctionFactory.createDoubleFunction("Sigmoid"));
-      ann.setCostFunction(FunctionFactory
-          .createDoubleDoubleFunction("CrossEntropy"));
-      ann.setModelPath(trainedModelPath);
-
-      Map<String, String> trainingParameters = new HashMap<String, String>();
-      trainingParameters.put("tasks", "2");
-      trainingParameters.put("training.max.iterations", "" + iteration);
-      trainingParameters.put("training.batch.size", "300");
-      trainingParameters.put("convergence.check.interval", "1000");
-      // ann.train(conf, new Path(trainingDataPath), trainingParameters);
-    }
-
-  }
-
-  private static void printUsage() {
-    System.out
-        .println("USAGE: <MODE> <INPUT_PATH> <OUTPUT_PATH> <MODEL_PATH>|<FEATURE_DIMENSION> <LABEL_DIMENSION> [<MAX_ITERATION> <LEARNING_RATE> <MOMEMTUM_WEIGHT> <REGULARIZATION_WEIGHT>]");
-    System.out
-        .println("\tMODE\t- train: train the model with given training data.");
-    System.out
-        .println("\t\t- label: obtain the result by feeding the features to the neural network.");
-    System.out
-        .println("\tINPUT_PATH\tin 'train' mode, it is the path of the training data; in 'label' mode, it is the path of the to be evaluated data that lacks the label.");
-    System.out
-        .println("\tOUTPUT_PATH\tin 'train' mode, it is where the trained model is stored; in 'label' mode, it is where the labeled data is stored.");
-    System.out.println("\n\tConditional Parameters:");
-    System.out
-        .println("\tMODEL_PATH\tonly required in 'label' mode. It specifies where to load the trained neural network model.");
-    System.out
-        .println("\tMAX_ITERATION\tonly used in 'train' mode. It specifies how many iterations for the neural network to run. Default is 0.01.");
-    System.out
-        .println("\tLEARNING_RATE\tonly used to 'train' mode. It specifies the degree of aggregation for learning, usually in range (0, 1.0). Default is 0.1.");
-    System.out
-        .println("\tMOMEMTUM_WEIGHT\tonly used to 'train' mode. It specifies the weight of momemtum. Default is 0.");
-    System.out
-        .println("\tREGULARIZATION_WEIGHT\tonly required in 'train' model. It specifies the weight of reqularization.");
-    System.out.println("\nExample:");
-    System.out
-        .println("Train a neural network with with feature dimension 8, label dimension 1 and default setting:\n\tneuralnets train hdfs://localhost:30002/training_data hdfs://localhost:30002/model 8 1");
-    System.out
-        .println("Train a neural network with with feature dimension 8, label dimension 1 and specify learning rate as 0.1, momemtum rate as 0.2, and regularization weight as 0.01:\n\tneuralnets.train hdfs://localhost:30002/training_data hdfs://localhost:30002/model 8 1 0.1 0.2 0.01");
-    System.out
-        .println("Label the data with trained model:\n\tneuralnets evaluate hdfs://localhost:30002/unlabeled_data hdfs://localhost:30002/result hdfs://localhost:30002/model");
-  }
-
-}
diff --git a/src/test/java/org/apache/horn/bsp/MLTestBase.java b/src/test/java/org/apache/horn/core/MLTestBase.java
similarity index 98%
rename from src/test/java/org/apache/horn/bsp/MLTestBase.java
rename to src/test/java/org/apache/horn/core/MLTestBase.java
index 8001bcf..3f02600 100644
--- a/src/test/java/org/apache/horn/bsp/MLTestBase.java
+++ b/src/test/java/org/apache/horn/core/MLTestBase.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import java.util.Arrays;
 import java.util.List;
diff --git a/src/test/java/org/apache/horn/bsp/TestAutoEncoder.java b/src/test/java/org/apache/horn/core/TestAutoEncoder.java
similarity index 98%
rename from src/test/java/org/apache/horn/bsp/TestAutoEncoder.java
rename to src/test/java/org/apache/horn/core/TestAutoEncoder.java
index a42fd72..10ae738 100644
--- a/src/test/java/org/apache/horn/bsp/TestAutoEncoder.java
+++ b/src/test/java/org/apache/horn/core/TestAutoEncoder.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import static org.junit.Assert.assertEquals;
 
@@ -39,6 +39,7 @@
 import org.apache.hama.commons.io.VectorWritable;
 import org.apache.hama.commons.math.DenseDoubleVector;
 import org.apache.hama.commons.math.DoubleVector;
+import org.apache.horn.core.AutoEncoder;
 import org.junit.Test;
 import org.mortbay.log.Log;
 
diff --git a/src/test/java/org/apache/horn/trainer/TestNeuron.java b/src/test/java/org/apache/horn/core/TestNeuron.java
similarity index 96%
rename from src/test/java/org/apache/horn/trainer/TestNeuron.java
rename to src/test/java/org/apache/horn/core/TestNeuron.java
index b5f6bfc..f2fe4e1 100644
--- a/src/test/java/org/apache/horn/trainer/TestNeuron.java
+++ b/src/test/java/org/apache/horn/core/TestNeuron.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.trainer;
+package org.apache.horn.core;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -25,8 +25,8 @@
 
 import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hama.HamaConfiguration;
-import org.apache.horn.bsp.Neuron;
-import org.apache.horn.bsp.Synapse;
+import org.apache.horn.core.Neuron;
+import org.apache.horn.core.Synapse;
 import org.apache.horn.funcs.Sigmoid;
 
 public class TestNeuron extends TestCase {
diff --git a/src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetwork.java b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
similarity index 94%
rename from src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetwork.java
rename to src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
index ee48136..7e4328f 100644
--- a/src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetwork.java
+++ b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -46,8 +46,8 @@
 import org.apache.hama.commons.math.DoubleVector;
 import org.apache.hama.ml.util.DefaultFeatureTransformer;
 import org.apache.hama.ml.util.FeatureTransformer;
-import org.apache.horn.bsp.AbstractLayeredNeuralNetwork.LearningStyle;
-import org.apache.horn.bsp.AbstractLayeredNeuralNetwork.TrainingMethod;
+import org.apache.horn.core.AbstractLayeredNeuralNetwork.LearningStyle;
+import org.apache.horn.core.AbstractLayeredNeuralNetwork.TrainingMethod;
 import org.apache.horn.funcs.FunctionFactory;
 import org.junit.Test;
 import org.mortbay.log.Log;
@@ -60,7 +60,7 @@
 
   @Test
   public void testReadWrite() {
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     ann.addLayer(2, false,
         FunctionFactory.createDoubleFunction("IdentityFunction"));
     ann.addLayer(5, false,
@@ -96,7 +96,7 @@
     }
 
     // read from file
-    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+    LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
     assertEquals(annCopy.getClass().getSimpleName(), annCopy.getModelType());
     assertEquals(modelPath, annCopy.getModelPath());
     // assertEquals(learningRate, annCopy.getLearningRate(), 0.000001);
@@ -128,7 +128,7 @@
    */
   public void testOutput() {
     // first network
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     ann.addLayer(2, false,
         FunctionFactory.createDoubleFunction("IdentityFunction"));
     ann.addLayer(5, false,
@@ -151,7 +151,7 @@
     // assertEquals(3, result.get(0), 0.000001);
 
     // second network
-    SmallLayeredNeuralNetwork ann2 = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann2 = new LayeredNeuralNetwork();
     ann2.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann2.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann2.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
@@ -170,7 +170,7 @@
     DoubleVector vec = ann2.getOutput(new DenseDoubleVector(test));
     assertArrayEquals(result2, vec.toArray(), 0.000001);
 
-    SmallLayeredNeuralNetwork ann3 = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann3 = new LayeredNeuralNetwork();
     ann3.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann3.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann3.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
@@ -190,7 +190,7 @@
 
   @Test
   public void testXORlocal() {
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
@@ -228,7 +228,7 @@
     } catch (IOException e) {
       e.printStackTrace();
     }
-    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+    LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
     // test on instances
     for (int i = 0; i < instances.length; ++i) {
       DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -243,7 +243,7 @@
 
   @Test
   public void testXORWithMomentum() {
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
@@ -278,7 +278,7 @@
     } catch (IOException e) {
       e.printStackTrace();
     }
-    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+    LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
     // test on instances
     for (int i = 0; i < instances.length; ++i) {
       DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -293,7 +293,7 @@
 
   @Test
   public void testXORLocalWithRegularization() {
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
     ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
@@ -329,7 +329,7 @@
     } catch (IOException e) {
       e.printStackTrace();
     }
-    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+    LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
     // test on instances
     for (int i = 0; i < instances.length; ++i) {
       DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -377,7 +377,7 @@
     List<double[]> trainingInstances = instanceList.subList(0,
         instanceList.size() - 100);
 
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     // ann.setLearningRate(0.001);
     // ann.setMomemtumWeight(0.1);
     //ann.setRegularizationWeight(0.01);
@@ -485,7 +485,7 @@
 
     // create model
     int dimension = 8;
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     // ann.setLearningRate(0.7);
     // ann.setMomemtumWeight(0.5);
     //ann.setRegularizationWeight(0.1);
@@ -590,7 +590,7 @@
 
     // create model
     int dimension = 8;
-    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
+    LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
     // ann.setLearningRate(0.7);
     // ann.setMomemtumWeight(0.5);
     //ann.setRegularizationWeight(0.1);
diff --git a/src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetworkMessage.java b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetworkMessage.java
similarity index 93%
rename from src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetworkMessage.java
rename to src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetworkMessage.java
index e422d95..a0c66d2 100644
--- a/src/test/java/org/apache/horn/bsp/TestSmallLayeredNeuralNetworkMessage.java
+++ b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetworkMessage.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.horn.bsp;
+package org.apache.horn.core;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -34,6 +34,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hama.commons.math.DenseDoubleMatrix;
 import org.apache.hama.commons.math.DoubleMatrix;
+import org.apache.horn.core.ParameterMessage;
 import org.junit.Test;
 
 /**
@@ -54,7 +55,7 @@
 
     boolean isConverge = false;
 
-    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
+    ParameterMessage message = new ParameterMessage(
         error, isConverge, matrices, null);
     Configuration conf = new Configuration();
     String strPath = "/tmp/testReadWriteSmallLayeredNeuralNetworkMessage";
@@ -66,7 +67,7 @@
       out.close();
 
       FSDataInputStream in = fs.open(path);
-      SmallLayeredNeuralNetworkMessage readMessage = new SmallLayeredNeuralNetworkMessage(
+      ParameterMessage readMessage = new ParameterMessage(
           0, isConverge, null, null);
       readMessage.readFields(in);
       in.close();
@@ -117,7 +118,7 @@
     prevMatrices[0] = new DenseDoubleMatrix(prevMatrix1);
     prevMatrices[1] = new DenseDoubleMatrix(prevMatrix2);
 
-    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
+    ParameterMessage message = new ParameterMessage(
         error, isConverge, matrices, prevMatrices);
     Configuration conf = new Configuration();
     String strPath = "/tmp/testReadWriteSmallLayeredNeuralNetworkMessageWithPrev";
@@ -129,7 +130,7 @@
       out.close();
 
       FSDataInputStream in = fs.open(path);
-      SmallLayeredNeuralNetworkMessage readMessage = new SmallLayeredNeuralNetworkMessage(
+      ParameterMessage readMessage = new ParameterMessage(
           0, isConverge, null, null);
       readMessage.readFields(in);
       in.close();
diff --git a/src/test/java/org/apache/horn/examples/NeuralNetworkTest.java b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
similarity index 74%
rename from src/test/java/org/apache/horn/examples/NeuralNetworkTest.java
rename to src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
index 932b17a..fd24c4f 100644
--- a/src/test/java/org/apache/horn/examples/NeuralNetworkTest.java
+++ b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
@@ -18,14 +18,10 @@
 package org.apache.horn.examples;
 
 import java.io.BufferedReader;
-import java.io.BufferedWriter;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
 import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,21 +33,20 @@
 import org.apache.hama.commons.io.VectorWritable;
 import org.apache.hama.commons.math.DenseDoubleVector;
 import org.apache.hama.commons.math.DoubleVector;
-import org.apache.horn.bsp.HornJob;
-import org.apache.horn.bsp.SmallLayeredNeuralNetwork;
+import org.apache.horn.core.HornJob;
+import org.apache.horn.core.LayeredNeuralNetwork;
 
 /**
  * Test the functionality of NeuralNetwork Example.
- * 
  */
-public class NeuralNetworkTest extends HamaCluster {
+public class MultiLayerPerceptronTest extends HamaCluster {
   private HamaConfiguration conf;
   private FileSystem fs;
   private String MODEL_PATH = "/tmp/neuralnets.model";
   private String RESULT_PATH = "/tmp/neuralnets.txt";
   private String SEQTRAIN_DATA = "/tmp/test-neuralnets.data";
 
-  public NeuralNetworkTest() {
+  public MultiLayerPerceptronTest() {
     conf = new HamaConfiguration();
     conf.set("bsp.master.address", "localhost");
     conf.setBoolean("hama.child.redirect.log.console", true);
@@ -82,22 +77,23 @@
 
     String featureDataPath = "src/test/resources/neuralnets_classification_test.txt";
     try {
-      SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork(conf,
+      LayeredNeuralNetwork ann = new LayeredNeuralNetwork(conf,
           MODEL_PATH);
 
       // process data in streaming approach
       FileSystem fs = FileSystem.get(new URI(featureDataPath), conf);
       BufferedReader br = new BufferedReader(new InputStreamReader(
           fs.open(new Path(featureDataPath))));
-      Path outputPath = new Path(RESULT_PATH);
-      if (fs.exists(outputPath)) {
-        fs.delete(outputPath, true);
-      }
-      BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
-          fs.create(outputPath)));
 
       String line = null;
+      line = null;
 
+      // compare results with ground-truth
+      BufferedReader groundTruthReader = new BufferedReader(new FileReader(
+          "src/test/resources/neuralnets_classification_label.txt"));
+
+      double correct = 0;
+      int samples = 0;
       while ((line = br.readLine()) != null) {
         if (line.trim().length() == 0) {
           continue;
@@ -109,52 +105,21 @@
         }
         DoubleVector instance = new DenseDoubleVector(vals);
         DoubleVector result = ann.getOutput(instance);
-        double[] arrResult = result.toArray();
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < arrResult.length; ++i) {
-          sb.append(arrResult[i]);
-          if (i != arrResult.length - 1) {
-            sb.append(",");
-          } else {
-            sb.append("\n");
-          }
-        }
-        bw.write(sb.toString());
-      }
+        double actual = result.toArray()[0];
+        double expected = Double.parseDouble(groundTruthReader.readLine());
 
-      br.close();
-      bw.close();
-
-      // compare results with ground-truth
-      BufferedReader groundTruthReader = new BufferedReader(new FileReader(
-          "src/test/resources/neuralnets_classification_label.txt"));
-      List<Double> groundTruthList = new ArrayList<Double>();
-      line = null;
-      while ((line = groundTruthReader.readLine()) != null) {
-        groundTruthList.add(Double.parseDouble(line));
-      }
-      groundTruthReader.close();
-
-      BufferedReader resultReader = new BufferedReader(new FileReader(
-          RESULT_PATH));
-      List<Double> resultList = new ArrayList<Double>();
-      while ((line = resultReader.readLine()) != null) {
-        resultList.add(Double.parseDouble(line));
-      }
-      resultReader.close();
-      int total = resultList.size();
-      double correct = 0;
-      for (int i = 0; i < groundTruthList.size(); ++i) {
-        double actual = resultList.get(i);
-        double expected = groundTruthList.get(i);
         LOG.info("evaluated: " + actual + ", expected: " + expected);
         if (actual < 0.5 && expected < 0.5 || actual >= 0.5 && expected >= 0.5) {
           ++correct;
         }
+        samples++;
       }
 
-      LOG.info("## Precision: " + (correct / total));
-      assertTrue((correct / total) > 0.5);
+      groundTruthReader.close();
+      br.close();
+
+      LOG.info("## Precision: " + (correct / samples));
+      assertTrue((correct / samples) > 0.5);
 
     } catch (Exception e) {
       e.printStackTrace();
@@ -201,10 +166,10 @@
 
       long startTime = System.currentTimeMillis();
       if (ann.waitForCompletion(true)) {
-        LOG.info("Job Finished in "
-            + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
+        LOG.info("Job Finished in " + (System.currentTimeMillis() - startTime)
+            / 1000.0 + " seconds");
       }
-      
+
     } catch (Exception e) {
       e.printStackTrace();
     }