[KYUUBI #5243] Distinguish metadata between batch impl v2 and recovery

### _Why are the changes needed?_

The `recoveryMetadata` is not accurate after batch impl is introduced. This PR proposes to rename `recoveryMetadata` to `metadata` and introduce a dedicated flay `fromRecovery` to distinguish metadata between them.

This PR also partially reverts #4798, by removing unnecessary constructor parameters `shouldRunAsync` and `batchConf`

### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible

- [ ] Add screenshots for manual tests if appropriate

- [x] [Run test](https://kyuubi.readthedocs.io/en/master/contributing/code/testing.html#running-tests) locally before make a pull request

### _Was this patch authored or co-authored using generative AI tooling?_

No.

Closes #5243 from pan3793/meta-recov.

Closes #5243

0718fbefe [Cheng Pan] nit
b8358464c [Cheng Pan] simplify
a2d6519c6 [Cheng Pan] fix test
2dad868bd [Cheng Pan] refactor
f83d2a602 [Cheng Pan] Distinguish batch impl v2 metadata from recovery

Authored-by: Cheng Pan <chengpan@apache.org>
Signed-off-by: Cheng Pan <chengpan@apache.org>
This commit is contained in:
Cheng Pan 2023-09-06 02:51:43 +08:00
parent c3b7af0b54
commit 6a23f88b00
No known key found for this signature in database
GPG Key ID: 8001952629BCC75D
11 changed files with 101 additions and 104 deletions

View File

@ -19,7 +19,6 @@ package org.apache.kyuubi.kubernetes.test.spark
import java.util.UUID import java.util.UUID
import scala.collection.JavaConverters._
import scala.concurrent.duration._ import scala.concurrent.duration._
import org.apache.hadoop.conf.Configuration import org.apache.hadoop.conf.Configuration
@ -149,7 +148,6 @@ class KyuubiOperationKubernetesClusterClientModeSuite
"kyuubi", "kyuubi",
"passwd", "passwd",
"localhost", "localhost",
batchRequest.getConf.asScala.toMap,
batchRequest) batchRequest)
eventually(timeout(3.minutes), interval(50.milliseconds)) { eventually(timeout(3.minutes), interval(50.milliseconds)) {
@ -217,7 +215,6 @@ class KyuubiOperationKubernetesClusterClusterModeSuite
"runner", "runner",
"passwd", "passwd",
"localhost", "localhost",
batchRequest.getConf.asScala.toMap,
batchRequest) batchRequest)
// wait for driver pod start // wait for driver pod start

View File

@ -58,11 +58,12 @@ class BatchJobSubmission(
className: String, className: String,
batchConf: Map[String, String], batchConf: Map[String, String],
batchArgs: Seq[String], batchArgs: Seq[String],
recoveryMetadata: Option[Metadata], metadata: Option[Metadata])
override val shouldRunAsync: Boolean)
extends KyuubiApplicationOperation(session) { extends KyuubiApplicationOperation(session) {
import BatchJobSubmission._ import BatchJobSubmission._
override def shouldRunAsync: Boolean = true
private val _operationLog = OperationLog.createOperationLog(session, getHandle) private val _operationLog = OperationLog.createOperationLog(session, getHandle)
private val applicationManager = session.sessionManager.applicationManager private val applicationManager = session.sessionManager.applicationManager
@ -75,7 +76,7 @@ class BatchJobSubmission(
private var killMessage: KillResponse = (false, "UNKNOWN") private var killMessage: KillResponse = (false, "UNKNOWN")
def getKillMessage: KillResponse = killMessage def getKillMessage: KillResponse = killMessage
@volatile private var _appStartTime = recoveryMetadata.map(_.engineOpenTime).getOrElse(0L) @volatile private var _appStartTime = metadata.map(_.engineOpenTime).getOrElse(0L)
def appStartTime: Long = _appStartTime def appStartTime: Long = _appStartTime
def appStarted: Boolean = _appStartTime > 0 def appStarted: Boolean = _appStartTime > 0
@ -184,21 +185,24 @@ class BatchJobSubmission(
override protected def runInternal(): Unit = session.handleSessionException { override protected def runInternal(): Unit = session.handleSessionException {
val asyncOperation: Runnable = () => { val asyncOperation: Runnable = () => {
try { try {
recoveryMetadata match { metadata match {
case Some(metadata) if metadata.peerInstanceClosed => case Some(metadata) if metadata.peerInstanceClosed =>
setState(OperationState.CANCELED) setState(OperationState.CANCELED)
case Some(metadata) if metadata.state == OperationState.PENDING.toString => case Some(metadata) if metadata.state == OperationState.PENDING.toString =>
// In recovery mode, only submit batch job when previous state is PENDING // case 1: new batch job created using batch impl v2
// and fail to fetch the status including appId from resource manager. // case 2: batch job from recovery, do submission only when previous state is
// Otherwise, monitor the submitted batch application. // PENDING and fail to fetch the status by appId from resource manager, which
// is similar with case 1; otherwise, monitor the submitted batch application.
_applicationInfo = currentApplicationInfo() _applicationInfo = currentApplicationInfo()
applicationId(_applicationInfo) match { applicationId(_applicationInfo) match {
case Some(appId) => monitorBatchJob(appId)
case None => submitAndMonitorBatchJob() case None => submitAndMonitorBatchJob()
case Some(appId) => monitorBatchJob(appId)
} }
case Some(metadata) => case Some(metadata) =>
// batch job from recovery which was submitted
monitorBatchJob(metadata.engineId) monitorBatchJob(metadata.engineId)
case None => case None =>
// brand-new job created using batch impl v1
submitAndMonitorBatchJob() submitAndMonitorBatchJob()
} }
setStateIfNotCanceled(OperationState.FINISHED) setStateIfNotCanceled(OperationState.FINISHED)
@ -219,7 +223,6 @@ class BatchJobSubmission(
updateBatchMetadata() updateBatchMetadata()
} }
} }
if (!shouldRunAsync) getBackgroundHandle.get()
} }
private def submitAndMonitorBatchJob(): Unit = { private def submitAndMonitorBatchJob(): Unit = {
@ -295,19 +298,19 @@ class BatchJobSubmission(
} }
if (_applicationInfo.isEmpty) { if (_applicationInfo.isEmpty) {
info(s"The $batchType batch[$batchId] job: $appId not found, assume that it has finished.") info(s"The $batchType batch[$batchId] job: $appId not found, assume that it has finished.")
} else if (applicationFailed(_applicationInfo)) { return
}
if (applicationFailed(_applicationInfo)) {
throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}")
}
updateBatchMetadata()
// TODO: add limit for max batch job submission lifetime
while (_applicationInfo.isDefined && !applicationTerminated(_applicationInfo)) {
Thread.sleep(applicationCheckInterval)
updateApplicationInfoMetadataIfNeeded()
}
if (applicationFailed(_applicationInfo)) {
throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}")
} else {
updateBatchMetadata()
// TODO: add limit for max batch job submission lifetime
while (_applicationInfo.isDefined && !applicationTerminated(_applicationInfo)) {
Thread.sleep(applicationCheckInterval)
updateApplicationInfoMetadataIfNeeded()
}
if (applicationFailed(_applicationInfo)) {
throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}")
}
} }
} }

View File

@ -81,8 +81,7 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam
className: String, className: String,
batchConf: Map[String, String], batchConf: Map[String, String],
batchArgs: Seq[String], batchArgs: Seq[String],
recoveryMetadata: Option[Metadata], metadata: Option[Metadata]): BatchJobSubmission = {
shouldRunAsync: Boolean): BatchJobSubmission = {
val operation = new BatchJobSubmission( val operation = new BatchJobSubmission(
session, session,
batchType, batchType,
@ -91,8 +90,7 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam
className, className,
batchConf, batchConf,
batchArgs, batchArgs,
recoveryMetadata, metadata)
shouldRunAsync)
addOperation(operation) addOperation(operation)
operation operation
} }

View File

@ -22,7 +22,6 @@ import java.util.concurrent.atomic.AtomicBoolean
import org.apache.kyuubi.config.KyuubiConf.BATCH_SUBMITTER_THREADS import org.apache.kyuubi.config.KyuubiConf.BATCH_SUBMITTER_THREADS
import org.apache.kyuubi.operation.OperationState import org.apache.kyuubi.operation.OperationState
import org.apache.kyuubi.server.metadata.MetadataManager import org.apache.kyuubi.server.metadata.MetadataManager
import org.apache.kyuubi.server.metadata.api.Metadata
import org.apache.kyuubi.service.{AbstractService, Serverable} import org.apache.kyuubi.service.{AbstractService, Serverable}
import org.apache.kyuubi.session.KyuubiSessionManager import org.apache.kyuubi.session.KyuubiSessionManager
import org.apache.kyuubi.util.ThreadUtils import org.apache.kyuubi.util.ThreadUtils
@ -81,16 +80,9 @@ class KyuubiBatchService(
Option(metadata.requestName), Option(metadata.requestName),
metadata.resource, metadata.resource,
metadata.className, metadata.className,
metadata.requestConf,
metadata.requestArgs, metadata.requestArgs,
Some(metadata), // TODO some logic need to fix since it's not from recovery Some(metadata),
shouldRunAsync = true) fromRecovery = false)
val metadataForUpdate = Metadata(
identifier = batchId,
kyuubiInstance = kyuubiInstance,
requestConf = batchSession.optimizedConf,
clusterManager = batchSession.batchJobSubmissionOp.builder.clusterManager())
metadataManager.updateMetadata(metadataForUpdate, asyncRetryOnError = false)
val sessionHandle = sessionManager.openBatchSession(batchSession) val sessionHandle = sessionManager.openBatchSession(batchSession)
var submitted = false var submitted = false
while (!submitted) { // block until batch job submitted while (!submitted) { // block until batch job submitted
@ -113,7 +105,7 @@ class KyuubiBatchService(
// } // }
if (!submitted) Thread.sleep(1000) if (!submitted) Thread.sleep(1000)
} }
info(s"$batchId is submitted.") info(s"$batchId is submitted or finished.")
} }
} }
} }

View File

@ -269,7 +269,6 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging {
userName, userName,
"anonymous", "anonymous",
ipAddress, ipAddress,
request.getConf.asScala.toMap,
request) request)
} match { } match {
case Success(sessionHandle) => case Success(sessionHandle) =>

View File

@ -41,10 +41,9 @@ class KyuubiBatchSession(
batchName: Option[String], batchName: Option[String],
resource: String, resource: String,
className: String, className: String,
batchConf: Map[String, String],
batchArgs: Seq[String], batchArgs: Seq[String],
recoveryMetadata: Option[Metadata] = None, metadata: Option[Metadata] = None,
shouldRunAsync: Boolean) fromRecovery: Boolean)
extends KyuubiSession( extends KyuubiSession(
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1,
user, user,
@ -55,11 +54,11 @@ class KyuubiBatchSession(
override val sessionType: SessionType = SessionType.BATCH override val sessionType: SessionType = SessionType.BATCH
override val handle: SessionHandle = { override val handle: SessionHandle = {
val batchId = recoveryMetadata.map(_.identifier).getOrElse(conf(KYUUBI_BATCH_ID_KEY)) val batchId = metadata.map(_.identifier).getOrElse(conf(KYUUBI_BATCH_ID_KEY))
SessionHandle.fromUUID(batchId) SessionHandle.fromUUID(batchId)
} }
override def createTime: Long = recoveryMetadata.map(_.createTime).getOrElse(super.createTime) override def createTime: Long = metadata.map(_.createTime).getOrElse(super.createTime)
override def getNoOperationTime: Long = { override def getNoOperationTime: Long = {
if (batchJobSubmissionOp != null && !OperationState.isTerminal( if (batchJobSubmissionOp != null && !OperationState.isTerminal(
@ -74,7 +73,7 @@ class KyuubiBatchSession(
sessionManager.getConf.get(KyuubiConf.BATCH_SESSION_IDLE_TIMEOUT) sessionManager.getConf.get(KyuubiConf.BATCH_SESSION_IDLE_TIMEOUT)
override val normalizedConf: Map[String, String] = override val normalizedConf: Map[String, String] =
sessionConf.getBatchConf(batchType) ++ sessionManager.validateBatchConf(batchConf) sessionConf.getBatchConf(batchType) ++ sessionManager.validateBatchConf(conf)
val optimizedConf: Map[String, String] = { val optimizedConf: Map[String, String] = {
val confOverlay = sessionManager.sessionConfAdvisor.getConfOverlay( val confOverlay = sessionManager.sessionConfAdvisor.getConfOverlay(
@ -95,7 +94,7 @@ class KyuubiBatchSession(
// whether the resource file is from uploading // whether the resource file is from uploading
private[kyuubi] val isResourceUploaded: Boolean = private[kyuubi] val isResourceUploaded: Boolean =
batchConf.getOrElse(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean conf.getOrElse(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean
private[kyuubi] lazy val batchJobSubmissionOp = sessionManager.operationManager private[kyuubi] lazy val batchJobSubmissionOp = sessionManager.operationManager
.newBatchJobSubmissionOperation( .newBatchJobSubmissionOperation(
@ -106,8 +105,7 @@ class KyuubiBatchSession(
className, className,
optimizedConf, optimizedConf,
batchArgs, batchArgs,
recoveryMetadata, metadata)
shouldRunAsync)
private def waitMetadataRequestsRetryCompletion(): Unit = { private def waitMetadataRequestsRetryCompletion(): Unit = {
val batchId = batchJobSubmissionOp.batchId val batchId = batchJobSubmissionOp.batchId
@ -122,7 +120,9 @@ class KyuubiBatchSession(
} }
private val sessionEvent = KyuubiSessionEvent(this) private val sessionEvent = KyuubiSessionEvent(this)
recoveryMetadata.foreach(metadata => sessionEvent.engineId = metadata.engineId) if (fromRecovery) {
metadata.foreach { m => sessionEvent.engineId = m.engineId }
}
EventBus.post(sessionEvent) EventBus.post(sessionEvent)
override def getSessionEvent: Option[KyuubiSessionEvent] = { override def getSessionEvent: Option[KyuubiSessionEvent] = {
@ -142,32 +142,47 @@ class KyuubiBatchSession(
override def open(): Unit = handleSessionException { override def open(): Unit = handleSessionException {
traceMetricsOnOpen() traceMetricsOnOpen()
if (recoveryMetadata.isEmpty) { lazy val kubernetesInfo: Map[String, String] = {
val appMgrInfo = batchJobSubmissionOp.builder.appMgrInfo() val appMgrInfo = batchJobSubmissionOp.builder.appMgrInfo()
val kubernetesInfo = appMgrInfo.kubernetesInfo.context.map { context => appMgrInfo.kubernetesInfo.context.map { context =>
Map(KyuubiConf.KUBERNETES_CONTEXT.key -> context) Map(KyuubiConf.KUBERNETES_CONTEXT.key -> context)
}.getOrElse(Map.empty) ++ appMgrInfo.kubernetesInfo.namespace.map { namespace => }.getOrElse(Map.empty) ++ appMgrInfo.kubernetesInfo.namespace.map { namespace =>
Map(KyuubiConf.KUBERNETES_NAMESPACE.key -> namespace) Map(KyuubiConf.KUBERNETES_NAMESPACE.key -> namespace)
}.getOrElse(Map.empty) }.getOrElse(Map.empty)
val metaData = Metadata( }
identifier = handle.identifier.toString,
sessionType = sessionType,
realUser = realUser,
username = user,
ipAddress = ipAddress,
kyuubiInstance = connectionUrl,
state = OperationState.PENDING.toString,
resource = resource,
className = className,
requestName = name.orNull,
requestConf = optimizedConf ++ kubernetesInfo, // save the kubernetes info into request conf
requestArgs = batchArgs,
createTime = createTime,
engineType = batchType,
clusterManager = batchJobSubmissionOp.builder.clusterManager())
// there is a chance that operation failed w/ duplicated key error (metadata, fromRecovery) match {
sessionManager.insertMetadata(metaData) case (Some(initialMetadata), false) =>
// new batch job created using batch impl v2
val metadataToUpdate = Metadata(
identifier = initialMetadata.identifier,
kyuubiInstance = connectionUrl,
requestName = name.orNull,
requestConf = optimizedConf ++ kubernetesInfo, // save the kubernetes info
clusterManager = batchJobSubmissionOp.builder.clusterManager())
sessionManager.updateMetadata(metadataToUpdate)
case (None, _) =>
// new batch job created using batch impl v1
val newMetadata = Metadata(
identifier = handle.identifier.toString,
sessionType = sessionType,
realUser = realUser,
username = user,
ipAddress = ipAddress,
kyuubiInstance = connectionUrl,
state = OperationState.PENDING.toString,
resource = resource,
className = className,
requestName = name.orNull,
requestConf = optimizedConf ++ kubernetesInfo, // save the kubernetes info
requestArgs = batchArgs,
createTime = createTime,
engineType = batchType,
clusterManager = batchJobSubmissionOp.builder.clusterManager())
// there is a chance that operation failed w/ duplicated key error
sessionManager.insertMetadata(newMetadata)
case _ =>
} }
checkSessionAccessPathURIs() checkSessionAccessPathURIs()

View File

@ -144,10 +144,9 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
batchName: Option[String], batchName: Option[String],
resource: String, resource: String,
className: String, className: String,
batchConf: Map[String, String],
batchArgs: Seq[String], batchArgs: Seq[String],
recoveryMetadata: Option[Metadata] = None, metadata: Option[Metadata] = None,
shouldRunAsync: Boolean): KyuubiBatchSession = { fromRecovery: Boolean): KyuubiBatchSession = {
// scalastyle:on // scalastyle:on
val username = Option(user).filter(_.nonEmpty).getOrElse("anonymous") val username = Option(user).filter(_.nonEmpty).getOrElse("anonymous")
val sessionConf = this.getConf.getUserDefaults(user) val sessionConf = this.getConf.getUserDefaults(user)
@ -162,10 +161,9 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
batchName, batchName,
resource, resource,
className, className,
batchConf,
batchArgs, batchArgs,
recoveryMetadata, metadata,
shouldRunAsync) fromRecovery)
} }
private[kyuubi] def openBatchSession(batchSession: KyuubiBatchSession): SessionHandle = { private[kyuubi] def openBatchSession(batchSession: KyuubiBatchSession): SessionHandle = {
@ -202,22 +200,19 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
user: String, user: String,
password: String, password: String,
ipAddress: String, ipAddress: String,
conf: Map[String, String], batchRequest: BatchRequest): SessionHandle = {
batchRequest: BatchRequest,
shouldRunAsync: Boolean = true): SessionHandle = {
val batchSession = createBatchSession( val batchSession = createBatchSession(
user, user,
password, password,
ipAddress, ipAddress,
conf, batchRequest.getConf.asScala.toMap,
batchRequest.getBatchType, batchRequest.getBatchType,
Option(batchRequest.getName), Option(batchRequest.getName),
batchRequest.getResource, batchRequest.getResource,
batchRequest.getClassName, batchRequest.getClassName,
batchRequest.getConf.asScala.toMap,
batchRequest.getArgs.asScala.toSeq, batchRequest.getArgs.asScala.toSeq,
None, None,
shouldRunAsync) fromRecovery = false)
openBatchSession(batchSession) openBatchSession(batchSession)
} }
@ -313,10 +308,9 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
Option(metadata.requestName), Option(metadata.requestName),
metadata.resource, metadata.resource,
metadata.className, metadata.className,
metadata.requestConf,
metadata.requestArgs, metadata.requestArgs,
Some(metadata), Some(metadata),
shouldRunAsync = true) fromRecovery = true)
}).getOrElse(Seq.empty) }).getOrElse(Seq.empty)
} }
} }

View File

@ -116,7 +116,6 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD
"kyuubi", "kyuubi",
"passwd", "passwd",
"localhost", "localhost",
batchRequest.getConf.asScala.toMap,
batchRequest) batchRequest)
val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession] val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession]
@ -180,7 +179,6 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD
"kyuubi", "kyuubi",
"passwd", "passwd",
"localhost", "localhost",
batchRequest.getConf.asScala.toMap,
batchRequest) batchRequest)
val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession] val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession]

View File

@ -135,13 +135,12 @@ class ServerJsonLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCT
} }
} }
val batchRequest = newSparkBatchRequest() val batchRequest = newSparkBatchRequest(Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))
val sessionMgr = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] val sessionMgr = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager]
val batchSessionHandle = sessionMgr.openBatchSession( val batchSessionHandle = sessionMgr.openBatchSession(
Utils.currentUser, Utils.currentUser,
"kyuubi", "kyuubi",
"127.0.0.1", "127.0.0.1",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
batchRequest) batchRequest)
withSessionConf()(Map.empty)(Map("spark.sql.shuffle.partitions" -> "2")) { withSessionConf()(Map.empty)(Map("spark.sql.shuffle.partitions" -> "2")) {
withJdbcStatement() { statement => withJdbcStatement() { statement =>

View File

@ -358,12 +358,12 @@ abstract class BatchesResourceSuiteBase extends KyuubiFunSuite
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
sparkBatchTestResource.get, sparkBatchTestResource.get,
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
sessionManager.openSession( sessionManager.openSession(
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11,
"", "",
@ -380,22 +380,22 @@ abstract class BatchesResourceSuiteBase extends KyuubiFunSuite
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
sparkBatchTestResource.get, sparkBatchTestResource.get,
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
sessionManager.openBatchSession( sessionManager.openBatchSession(
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
sparkBatchTestResource.get, sparkBatchTestResource.get,
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
val response2 = webTarget.path("api/v1/batches") val response2 = webTarget.path("api/v1/batches")
.queryParam("batchType", "spark") .queryParam("batchType", "spark")
@ -780,12 +780,14 @@ abstract class BatchesResourceSuiteBase extends KyuubiFunSuite
.be.sessionManager.asInstanceOf[KyuubiSessionManager] .be.sessionManager.asInstanceOf[KyuubiSessionManager]
val e = intercept[Exception] { val e = intercept[Exception] {
val conf = Map(
KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString,
"spark.jars" -> "disAllowPath")
sessionManager.openBatchSession( sessionManager.openBatchSession(
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newSparkBatchRequest(conf))
newSparkBatchRequest(Map("spark.jars" -> "disAllowPath")))
} }
val sessionHandleRegex = "\\[\\S*]".r val sessionHandleRegex = "\\[\\S*]".r
val batchId = sessionHandleRegex.findFirstMatchIn(e.getMessage).get.group(0) val batchId = sessionHandleRegex.findFirstMatchIn(e.getMessage).get.group(0)
@ -803,12 +805,12 @@ abstract class BatchesResourceSuiteBase extends KyuubiFunSuite
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
sparkBatchTestResource.get, sparkBatchTestResource.get,
"", "",
uniqueName)) uniqueName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
val response = webTarget.path("api/v1/batches") val response = webTarget.path("api/v1/batches")
.queryParam("batchName", uniqueName) .queryParam("batchName", uniqueName)

View File

@ -290,12 +290,12 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
"", "",
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
sessionManager.openSession( sessionManager.openSession(
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11,
"", "",
@ -312,22 +312,22 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
"", "",
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
sessionManager.openBatchSession( sessionManager.openBatchSession(
"kyuubi", "kyuubi",
"kyuubi", "kyuubi",
InetAddress.getLocalHost.getCanonicalHostName, InetAddress.getLocalHost.getCanonicalHostName,
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString),
newBatchRequest( newBatchRequest(
"spark", "spark",
"", "",
"", "",
"")) "",
Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)))
val listArgs = Array( val listArgs = Array(
"list", "list",