Merge branch 'develop'
diff --git a/.gitignore b/.gitignore
index 7c6771b..66a5d40 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,3 @@
-data/*.txt
manifest.json
pio.log
/pio.sbt
diff --git a/README.md b/README.md
index ebdf31a..b4f7cd7 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,12 @@
## Versions
+### v0.1.4
+
+- Update Evaluation for PredictionIO 0.9.1
+
+NOTE: Require PredictionIO >= 0.9.1
+
### v0.1.3
- Add missing sample data set (data/data.txt)
diff --git a/src/main/scala/CompleteEvaluation.scala b/src/main/scala/CompleteEvaluation.scala
new file mode 100644
index 0000000..e168872
--- /dev/null
+++ b/src/main/scala/CompleteEvaluation.scala
@@ -0,0 +1,13 @@
+package org.template.classification
+
+import io.prediction.controller.Evaluation
+import io.prediction.controller.MetricEvaluator
+
+object CompleteEvaluation extends Evaluation {
+ engineEvaluator = (
+ ClassificationEngine(),
+ MetricEvaluator(
+ metric = Accuracy(),
+ otherMetrics = Seq(Precision(0.0), Precision(1.0), Precision(2.0)),
+ outputPath = "best.json"))
+}
diff --git a/src/main/scala/Evaluation.scala b/src/main/scala/Evaluation.scala
index 8a903ee..ec8e16c 100644
--- a/src/main/scala/Evaluation.scala
+++ b/src/main/scala/Evaluation.scala
@@ -5,21 +5,16 @@
import io.prediction.controller.EngineParams
import io.prediction.controller.EngineParamsGenerator
import io.prediction.controller.Evaluation
-import io.prediction.controller.Workflow
-import org.apache.spark.SparkContext
-import org.apache.spark.SparkContext._
-import org.apache.spark.rdd.RDD
-case class Precision
- extends AverageMetric[EmptyEvaluationInfo,
- Query, PredictedResult, ActualResult] {
+case class Accuracy
+ extends AverageMetric[EmptyEvaluationInfo, Query, PredictedResult, ActualResult] {
def calculate(query: Query, predicted: PredictedResult, actual: ActualResult)
: Double = (if (predicted.label == actual.label) 1.0 else 0.0)
}
-object PrecisionEvaluation extends Evaluation {
+object AccuracyEvaluation extends Evaluation {
// Define Engine and Metric used in Evaluation
- engineMetric = (ClassificationEngine(), new Precision())
+ engineMetric = (ClassificationEngine(), new Accuracy())
}
object EngineParamsList extends EngineParamsGenerator {
@@ -29,7 +24,7 @@
// the data is read, and a evalK parameter is used to define the
// cross-validation.
private[this] val baseEP = EngineParams(
- dataSourceParams = DataSourceParams(appId = 18, evalK = Some(5)))
+ dataSourceParams = DataSourceParams(appId = 19, evalK = Some(5)))
// Second, we specify the engine params list by explicitly listing all
// algorithm parameters. In this case, we evaluate 3 engine params, each with
diff --git a/src/main/scala/PrecisionEvaluation.scala b/src/main/scala/PrecisionEvaluation.scala
new file mode 100644
index 0000000..de80b76
--- /dev/null
+++ b/src/main/scala/PrecisionEvaluation.scala
@@ -0,0 +1,27 @@
+package org.template.classification
+
+import io.prediction.controller.OptionAverageMetric
+import io.prediction.controller.EmptyEvaluationInfo
+import io.prediction.controller.Evaluation
+
+case class Precision(label: Double)
+ extends OptionAverageMetric[EmptyEvaluationInfo, Query, PredictedResult, ActualResult] {
+ override def header: String = s"Precision(label = $label)"
+
+ def calculate(query: Query, predicted: PredictedResult, actual: ActualResult)
+ : Option[Double] = {
+ if (predicted.label == label) {
+ if (predicted.label == actual.label) {
+ Some(1.0) // True positive
+ } else {
+ Some(0.0) // False positive
+ }
+ } else {
+ None // Unrelated case for calcuating precision
+ }
+ }
+}
+
+object PrecisionEvaluation extends Evaluation {
+ engineMetric = (ClassificationEngine(), new Precision(label = 1.0))
+}
diff --git a/template.json b/template.json
index 932e603..e617464 100644
--- a/template.json
+++ b/template.json
@@ -1 +1 @@
-{"pio": {"version": { "min": "0.9.0" }}}
+{"pio": {"version": { "min": "0.9.1" }}}