Instance Constructors
-
new
JobManagerActor(contextConfig: Config)
Type Members
-
type
Receive = PartialFunction[Any, Unit]
Value Members
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: Any): Boolean
-
def
aroundPostRestart(reason: Throwable): Unit
-
def
aroundPostStop(): Unit
-
def
aroundPreRestart(reason: Throwable, message: Option[Any]): Unit
-
def
aroundPreStart(): Unit
-
def
aroundReceive(receive: akka.actor.Actor.Receive, msg: Any): Unit
-
final
def
asInstanceOf[T0]: T0
-
def
clone(): AnyRef
-
val
config: Config
-
implicit
val
context: ActorContext
-
def
createContextFromConfig(contextName: String = contextName): ContextLike
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
val
executionContext: ExecutionContextExecutorService
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
getRootCause(t: Throwable): Throwable
-
def
hashCode(): Int
-
final
def
isInstanceOf[T0]: Boolean
-
-
val
logger: Logger
-
val
metricReceiveTimer: Timer
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
def
postRestart(reason: Throwable): Unit
-
def
postStop(): Unit
-
def
preRestart(reason: Throwable, message: Option[Any]): Unit
-
def
preStart(): Unit
-
-
var
resultActor: ActorRef
-
implicit final
val
self: ActorRef
-
final
def
sender(): ActorRef
-
var
sparkEnv: SparkEnv
-
def
startJobInternal(appName: String, classPath: String, jobConfig: Config, events: Set[Class[_]], jobContext: ContextLike, sparkEnv: SparkEnv): Option[Future[Any]]
-
def
supervisorStrategy: SupervisorStrategy
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
toString(): String
-
def
unhandled(message: Any): Unit
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
-
def
wrapInRuntimeException(t: Throwable): RuntimeException
-
def
wrappedReceive: Receive
Inherited from Actor
Inherited from AnyRef
Inherited from Any
The JobManager actor supervises jobs running in a single SparkContext, as well as shared metadata. It creates a SparkContext (or a StreamingContext etc. depending on the factory class) It also creates and supervises a JobResultActor and JobStatusActor, although an existing JobResultActor can be passed in as well.
contextConfig
global configuration
spark { jobserver { max-jobs-per-context = 16 # Number of jobs that can be run simultaneously per context } }