Merge pull request #98 from databricks/parallel-runs
Add option to avoid cleaning after each run, to enable parallel runs
This commit is contained in:
commit
c12b14b013
@ -5,7 +5,7 @@ name := "spark-sql-perf"
|
||||
|
||||
organization := "com.databricks"
|
||||
|
||||
scalaVersion := "2.10.6"
|
||||
scalaVersion := "2.11.8"
|
||||
|
||||
crossScalaVersions := Seq("2.10.6", "2.11.8")
|
||||
|
||||
|
||||
@ -58,10 +58,12 @@ trait Benchmarkable extends Logging {
|
||||
private def afterBenchmark(sc: SparkContext): Unit = {
|
||||
// Best-effort clean up of weakly referenced RDDs, shuffles, and broadcasts
|
||||
System.gc()
|
||||
// Remove any leftover blocks that still exist
|
||||
sc.getExecutorStorageStatus
|
||||
.flatMap { status => status.blocks.map { case (bid, _) => bid } }
|
||||
.foreach { bid => SparkEnv.get.blockManager.master.removeBlock(bid) }
|
||||
if (sparkContext.getConf.getBoolean("spark.databricks.benchmark.cleanBlocksAfter", true)) {
|
||||
// Remove any leftover blocks that still exist
|
||||
sc.getExecutorStorageStatus
|
||||
.flatMap { status => status.blocks.map { case (bid, _) => bid } }
|
||||
.foreach { bid => SparkEnv.get.blockManager.master.removeBlock(bid) }
|
||||
}
|
||||
}
|
||||
|
||||
private def runBenchmarkForked(
|
||||
|
||||
@ -1 +1 @@
|
||||
version in ThisBuild := "0.4.11-SNAPSHOT"
|
||||
version in ThisBuild := "0.4.12-SNAPSHOT"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user