shark.api

JavaSharkContext

class JavaSharkContext extends JavaSparkContext

Linear Supertypes
JavaSparkContext, JavaSparkContextVarargsWorkaround, AnyRef, Any
Ordering
  1. Alphabetic
  2. By inheritance
Inherited
  1. JavaSharkContext
  2. JavaSparkContext
  3. JavaSparkContextVarargsWorkaround
  4. AnyRef
  5. Any
  1. Hide All
  2. Show all
Learn more about member selection
Visibility
  1. Public
  2. All

Instance Constructors

  1. new JavaSharkContext(master: String, jobName: String, sparkHome: String, jars: Array[String], environment: Map[String, String])

    master

    Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).

    jobName

    A name for your job, to display on the cluster web UI

    sparkHome

    The SPARK_HOME directory on the slave nodes

    jars

    Collection of JARs to send to the cluster. These can be paths on the local file system or HDFS, HTTP, HTTPS, or FTP URLs.

    environment

    Environment variables to set on worker nodes

  2. new JavaSharkContext(master: String, jobName: String, sparkHome: String, jars: Array[String])

    master

    Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).

    jobName

    A name for your job, to display on the cluster web UI

    sparkHome

    The SPARK_HOME directory on the slave nodes

    jars

    Collection of JARs to send to the cluster. These can be paths on the local file system or HDFS, HTTP, HTTPS, or FTP URLs.

  3. new JavaSharkContext(master: String, jobName: String, sparkHome: String, jarFile: String)

    master

    Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).

    jobName

    A name for your job, to display on the cluster web UI

    sparkHome

    The SPARK_HOME directory on the slave nodes

    jarFile

    A JAR to send to the cluster. This can be a path on the local file system or a HDFS, HTTP, HTTPS, or FTP URL.

  4. new JavaSharkContext(master: String, jobName: String)

    master

    Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).

    jobName

    A name for your job, to display on the cluster web UI

  5. new JavaSharkContext(sharkCtx: SharkContext)

Value Members

  1. final def !=(arg0: AnyRef): Boolean

    Definition Classes
    AnyRef
  2. final def !=(arg0: Any): Boolean

    Definition Classes
    Any
  3. final def ##(): Int

    Definition Classes
    AnyRef → Any
  4. final def ==(arg0: AnyRef): Boolean

    Definition Classes
    AnyRef
  5. final def ==(arg0: Any): Boolean

    Definition Classes
    Any
  6. def accumulable[T, R](initialValue: T, param: AccumulableParam[T, R]): Accumulable[T, R]

    Definition Classes
    JavaSparkContext
  7. def accumulator[T](initialValue: T, accumulatorParam: AccumulatorParam[T]): Accumulator[T]

    Definition Classes
    JavaSparkContext
  8. def accumulator(initialValue: Double): Accumulator[Double]

    Definition Classes
    JavaSparkContext
  9. def accumulator(initialValue: Int): Accumulator[Integer]

    Definition Classes
    JavaSparkContext
  10. def addFile(path: String): Unit

    Definition Classes
    JavaSparkContext
  11. def addJar(path: String): Unit

    Definition Classes
    JavaSparkContext
  12. final def asInstanceOf[T0]: T0

    Definition Classes
    Any
  13. def broadcast[T](value: T): Broadcast[T]

    Definition Classes
    JavaSparkContext
  14. def cancelAllJobs(): Unit

    Definition Classes
    JavaSparkContext
  15. def cancelJobGroup(groupId: String): Unit

    Definition Classes
    JavaSparkContext
  16. def checkpointFile[T](path: String): JavaRDD[T]

    Attributes
    protected
    Definition Classes
    JavaSparkContext
  17. def clearCallSite(): Unit

    Definition Classes
    JavaSparkContext
  18. def clearFiles(): Unit

    Definition Classes
    JavaSparkContext
  19. def clearJars(): Unit

    Definition Classes
    JavaSparkContext
  20. def clearJobGroup(): Unit

    Definition Classes
    JavaSparkContext
  21. def clone(): AnyRef

    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  22. def doubleAccumulator(initialValue: Double): Accumulator[Double]

    Definition Classes
    JavaSparkContext
  23. final def eq(arg0: AnyRef): Boolean

    Definition Classes
    AnyRef
  24. def equals(arg0: Any): Boolean

    Definition Classes
    AnyRef → Any
  25. def finalize(): Unit

    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  26. def getCheckpointDir: Optional[String]

    Definition Classes
    JavaSparkContext
  27. final def getClass(): Class[_]

    Definition Classes
    AnyRef → Any
  28. def getConf: SparkConf

    Definition Classes
    JavaSparkContext
  29. def getLocalProperty(key: String): String

    Definition Classes
    JavaSparkContext
  30. def getSparkHome(): Optional[String]

    Definition Classes
    JavaSparkContext
  31. def hadoopConfiguration(): Configuration

    Definition Classes
    JavaSparkContext
  32. def hadoopFile[K, V, F <: InputFormat[K, V]](path: String, inputFormatClass: Class[F], keyClass: Class[K], valueClass: Class[V]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  33. def hadoopFile[K, V, F <: InputFormat[K, V]](path: String, inputFormatClass: Class[F], keyClass: Class[K], valueClass: Class[V], minSplits: Int): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  34. def hadoopRDD[K, V, F <: InputFormat[K, V]](conf: JobConf, inputFormatClass: Class[F], keyClass: Class[K], valueClass: Class[V]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  35. def hadoopRDD[K, V, F <: InputFormat[K, V]](conf: JobConf, inputFormatClass: Class[F], keyClass: Class[K], valueClass: Class[V], minSplits: Int): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  36. def hashCode(): Int

    Definition Classes
    AnyRef → Any
  37. def intAccumulator(initialValue: Int): Accumulator[Integer]

    Definition Classes
    JavaSparkContext
  38. final def isInstanceOf[T0]: Boolean

    Definition Classes
    Any
  39. final def ne(arg0: AnyRef): Boolean

    Definition Classes
    AnyRef
  40. def newAPIHadoopFile[K, V, F <: InputFormat[K, V]](path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  41. def newAPIHadoopRDD[K, V, F <: InputFormat[K, V]](conf: Configuration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  42. final def notify(): Unit

    Definition Classes
    AnyRef
  43. final def notifyAll(): Unit

    Definition Classes
    AnyRef
  44. def objectFile[T](path: String): JavaRDD[T]

    Definition Classes
    JavaSparkContext
  45. def objectFile[T](path: String, minSplits: Int): JavaRDD[T]

    Definition Classes
    JavaSparkContext
  46. def parallelize[T](list: List[T]): JavaRDD[T]

    Definition Classes
    JavaSparkContext
  47. def parallelize[T](list: List[T], numSlices: Int): JavaRDD[T]

    Definition Classes
    JavaSparkContext
  48. def parallelizeDoubles(list: List[Double]): JavaDoubleRDD

    Definition Classes
    JavaSparkContext
  49. def parallelizeDoubles(list: List[Double], numSlices: Int): JavaDoubleRDD

    Definition Classes
    JavaSparkContext
  50. def parallelizePairs[K, V](list: List[(K, V)]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  51. def parallelizePairs[K, V](list: List[(K, V)], numSlices: Int): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  52. def runSql(cmd: String, maxRows: Int): ResultSet

    Execute a SQL command and collect the results locally.

    Execute a SQL command and collect the results locally.

    cmd

    The SQL command to be executed.

    maxRows

    The max number of rows to retrieve for the result set.

    returns

    A ResultSet object with both the schema and the query results.

  53. def runSql(cmd: String): ResultSet

    Execute a SQL command and collect the results locally.

    Execute a SQL command and collect the results locally. This function returns a maximum of 1000 rows. To fetch a larger result set, use runSql with maxRows specified.

    cmd

    The SQL command to be executed.

    returns

    A ResultSet object with both the schema and the query results.

  54. val sc: SparkContext

    Definition Classes
    JavaSparkContext
  55. def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  56. def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minSplits: Int): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext
  57. def setCallSite(site: String): Unit

    Definition Classes
    JavaSparkContext
  58. def setCheckpointDir(dir: String): Unit

    Definition Classes
    JavaSparkContext
  59. def setJobGroup(groupId: String, description: String): Unit

    Definition Classes
    JavaSparkContext
  60. def setLocalProperty(key: String, value: String): Unit

    Definition Classes
    JavaSparkContext
  61. val sharkCtx: SharkContext

  62. def sql(cmd: String): List[String]

    Execute the command and return the results as a sequence.

    Execute the command and return the results as a sequence. Each element in the sequence is one row.

  63. def sql2console(cmd: String): Unit

    Execute the command and print the results to the console.

  64. def sql2rdd(cmd: String): JavaTableRDD

    Execute the command and return the results as a TableRDD.

  65. def stop(): Unit

    Definition Classes
    JavaSparkContext
  66. final def synchronized[T0](arg0: ⇒ T0): T0

    Definition Classes
    AnyRef
  67. def textFile(path: String, minSplits: Int): JavaRDD[String]

    Definition Classes
    JavaSparkContext
  68. def textFile(path: String): JavaRDD[String]

    Definition Classes
    JavaSparkContext
  69. def toString(): String

    Definition Classes
    AnyRef → Any
  70. def union(first: JavaDoubleRDD, rest: List[JavaDoubleRDD]): JavaDoubleRDD

    Definition Classes
    JavaSparkContext → JavaSparkContextVarargsWorkaround
  71. def union[K, V](first: JavaPairRDD[K, V], rest: List[JavaPairRDD[K, V]]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContext → JavaSparkContextVarargsWorkaround
  72. def union[T](first: JavaRDD[T], rest: List[JavaRDD[T]]): JavaRDD[T]

    Definition Classes
    JavaSparkContext → JavaSparkContextVarargsWorkaround
  73. def union[K, V](arg0: <repeated...>[JavaPairRDD[K, V]]): JavaPairRDD[K, V]

    Definition Classes
    JavaSparkContextVarargsWorkaround
  74. def union(arg0: <repeated...>[JavaDoubleRDD]): JavaDoubleRDD

    Definition Classes
    JavaSparkContextVarargsWorkaround
  75. def union[T](arg0: <repeated...>[JavaRDD[T]]): JavaRDD[T]

    Definition Classes
    JavaSparkContextVarargsWorkaround
  76. final def wait(): Unit

    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  77. final def wait(arg0: Long, arg1: Int): Unit

    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  78. final def wait(arg0: Long): Unit

    Definition Classes
    AnyRef
    Annotations
    @throws( ... )

Inherited from JavaSparkContext

Inherited from JavaSparkContextVarargsWorkaround

Inherited from AnyRef

Inherited from Any

Ungrouped