Introduce small integration point with Spark perf
This allows us to report Spark perf results in the same format as SQL benchmark results. marmbrus Author: Andrew Or <andrew@databricks.com> Closes #30 from andrewor14/spark-perf.
This commit is contained in:
parent
ebcd5db414
commit
172ae79f8d
@ -392,9 +392,14 @@ abstract class Benchmark(
|
||||
|
||||
// Benchmark run by calculating the sum of the hash value of all rows. This is used to check
|
||||
// query results.
|
||||
case object HashResults extends ExecutionMode {
|
||||
case object HashResults extends ExecutionMode {
|
||||
override def toString: String = "hash"
|
||||
}
|
||||
|
||||
// Results from Spark perf
|
||||
case object SparkPerfResults extends ExecutionMode {
|
||||
override def toString: String = "sparkPerf"
|
||||
}
|
||||
}
|
||||
|
||||
/** Factory object for benchmark queries. */
|
||||
@ -555,4 +560,4 @@ abstract class Benchmark(
|
||||
new Query(name, buildDataFrame, description, sqlText, ExecutionMode.HashResults)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ case class ExperimentRun(
|
||||
case class BenchmarkConfiguration(
|
||||
sparkVersion: String = org.apache.spark.SPARK_VERSION,
|
||||
sqlConf: Map[String, String],
|
||||
sparkConf: Map[String,String],
|
||||
sparkConf: Map[String, String],
|
||||
defaultParallelism: Int)
|
||||
|
||||
/**
|
||||
@ -93,4 +93,4 @@ case class BreakdownResult(
|
||||
executionTime: Double,
|
||||
delta: Double)
|
||||
|
||||
case class Failure(className: String, message: String)
|
||||
case class Failure(className: String, message: String)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user