From 4f4b08a122cb432217c928e9818808007dad5770 Mon Sep 17 00:00:00 2001 From: Pace Francesco Date: Fri, 19 Jun 2015 15:01:57 +0200 Subject: [PATCH] Reading hadoopConfiguration from Spark. Read hadoopConfiguration from SparkContext instead of creating a new Configuration directly from Hadoop config files. This allow us to use hadoop parameters inserted or modified in one of Spark config files. (e.g.: Swift credentials). --- .../scala/com/databricks/spark/sql/perf/runBenchmarks.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/scala/com/databricks/spark/sql/perf/runBenchmarks.scala b/src/main/scala/com/databricks/spark/sql/perf/runBenchmarks.scala index fb4d69a..7a3c7be 100644 --- a/src/main/scala/com/databricks/spark/sql/perf/runBenchmarks.scala +++ b/src/main/scala/com/databricks/spark/sql/perf/runBenchmarks.scala @@ -143,7 +143,7 @@ abstract class Dataset( def checkData(): Unit = { tablesForTest.foreach { table => - val fs = FileSystem.get(new java.net.URI(table.outputDir), new Configuration()) + val fs = FileSystem.get(new java.net.URI(table.outputDir), sparkContext.hadoopConfiguration) val exists = fs.exists(new Path(table.outputDir)) val wasSuccessful = fs.exists(new Path(s"${table.outputDir}/_SUCCESS")) @@ -302,4 +302,4 @@ abstract class Dataset( } new ExperimentStatus } -} \ No newline at end of file +}