org.apache.spark.SparkContext.accumulable(R, AccumulableParam<R, T>)
|
org.apache.spark.SparkContext.accumulable(R, String, AccumulableParam<R, T>)
|
org.apache.spark.api.java.JavaSparkContext.accumulable(T, AccumulableParam<T, R>)
|
org.apache.spark.api.java.JavaSparkContext.accumulable(T, String, AccumulableParam<T, R>)
|
org.apache.spark.SparkContext.accumulableCollection(R, Function1<R, Growable<T>>, ClassTag<R>)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(double)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(double, String)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(int)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(int, String)
|
org.apache.spark.SparkContext.accumulator(T, AccumulatorParam<T>)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(T, AccumulatorParam<T>)
|
org.apache.spark.SparkContext.accumulator(T, String, AccumulatorParam<T>)
|
org.apache.spark.api.java.JavaSparkContext.accumulator(T, String, AccumulatorParam<T>)
|
org.apache.spark.sql.streaming.ProcessingTime.apply(Duration)
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, Option<String>, String)
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, Option<String>, String, boolean)
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, String)
|
org.apache.spark.sql.streaming.ProcessingTime.apply(String)
|
org.apache.spark.sql.SQLContext.applySchema(JavaRDD<?>, Class<?>)
|
org.apache.spark.sql.SQLContext.applySchema(JavaRDD<Row>, StructType)
|
org.apache.spark.sql.SQLContext.applySchema(RDD<?>, Class<?>)
|
org.apache.spark.sql.SQLContext.applySchema(RDD<Row>, StructType)
|
org.apache.spark.sql.functions.approxCountDistinct(Column)
|
org.apache.spark.sql.functions.approxCountDistinct(Column, double)
|
org.apache.spark.sql.functions.approxCountDistinct(String)
|
org.apache.spark.sql.functions.approxCountDistinct(String, double)
|
org.apache.spark.scheduler.StageInfo.attemptId()
|
org.apache.spark.sql.SQLContext.clearActive()
|
org.apache.spark.ml.clustering.KMeansModel.computeCost(Dataset<?>)
|
org.apache.spark.sql.streaming.ProcessingTime.create(long, TimeUnit)
|
org.apache.spark.sql.streaming.ProcessingTime.create(String)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String, Map<String, String>)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String, Map<String, String>)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String, Map<String, String>)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String, Map<String, String>)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String, String)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String, String)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String, StructType, Map<String, String>)
|
org.apache.spark.sql.SQLContext.createExternalTable(String, String, StructType, Map<String, String>)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String, StructType, Map<String, String>)
|
org.apache.spark.sql.catalog.Catalog.createExternalTable(String, String, StructType, Map<String, String>)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(JavaStreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(JavaStreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function<Record, T>, Class<T>)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(JavaStreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function<Record, T>, Class<T>, String, String)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(JavaStreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function<Record, T>, Class<T>, String, String, String, String, String)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(JavaStreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, String, String)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(StreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(StreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function1<Record, T>, ClassTag<T>)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(StreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function1<Record, T>, String, String, ClassTag<T>)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(StreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, Function1<Record, T>, String, String, String, String, String, ClassTag<T>)
|
org.apache.spark.streaming.kinesis.KinesisUtils.createStream(StreamingContext, String, String, String, String, InitialPositionInStream, Duration, StorageLevel, String, String)
|
org.apache.spark.sql.functions.currentRow()
|
org.apache.spark.api.java.JavaSparkContext.doubleAccumulator(double)
|
org.apache.spark.api.java.JavaSparkContext.doubleAccumulator(double, String)
|
org.apache.spark.sql.Dataset.explode(Seq<Column>, Function1<Row, TraversableOnce<A>>, TypeTags.TypeTag<A>)
|
org.apache.spark.sql.Dataset.explode(String, String, Function1<A, TraversableOnce<B>>, TypeTags.TypeTag<B>)
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.fMeasure()
|
org.apache.spark.sql.SQLContext.getOrCreate(SparkContext)
|
org.apache.spark.mllib.clustering.KMeans.getRuns()
|
org.apache.spark.api.java.JavaSparkContext.intAccumulator(int)
|
org.apache.spark.api.java.JavaSparkContext.intAccumulator(int, String)
|
org.apache.spark.TaskContext.isRunningLocally()
|
org.apache.spark.sql.SQLContext.jdbc(String, String)
|
org.apache.spark.sql.SQLContext.jdbc(String, String, String[])
|
org.apache.spark.sql.SQLContext.jdbc(String, String, String, long, long, int)
|
org.apache.spark.sql.DataFrameReader.json(JavaRDD<String>)
|
org.apache.spark.sql.DataFrameReader.json(RDD<String>)
|
org.apache.spark.sql.SQLContext.jsonFile(String)
|
org.apache.spark.sql.SQLContext.jsonFile(String, double)
|
org.apache.spark.sql.SQLContext.jsonFile(String, StructType)
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD<String>)
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD<String>, double)
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD<String>, StructType)
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD<String>)
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD<String>, double)
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD<String>, StructType)
|
org.apache.spark.sql.SQLContext.load(String)
|
org.apache.spark.sql.SQLContext.load(String, Map<String, String>)
|
org.apache.spark.sql.SQLContext.load(String, Map<String, String>)
|
org.apache.spark.sql.SQLContext.load(String, String)
|
org.apache.spark.sql.SQLContext.load(String, StructType, Map<String, String>)
|
org.apache.spark.sql.SQLContext.load(String, StructType, Map<String, String>)
|
org.apache.spark.sql.functions.monotonicallyIncreasingId()
|
org.apache.spark.sql.SQLImplicits.newBooleanSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newByteSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newDoubleSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newFloatSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newIntSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newLongSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newProductSeqEncoder(TypeTags.TypeTag<A>)
|
org.apache.spark.sql.SQLImplicits.newShortSeqEncoder()
|
org.apache.spark.sql.SQLImplicits.newStringSeqEncoder()
|
org.apache.spark.sql.SQLContext.parquetFile(Seq<String>)
|
org.apache.spark.sql.SQLContext.parquetFile(String...)
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.precision()
|
org.apache.spark.sql.expressions.Window.rangeBetween(Column, Column)
|
org.apache.spark.sql.expressions.WindowSpec.rangeBetween(Column, Column)
|
org.apache.spark.ml.image.ImageSchema.readImages(String)
|
org.apache.spark.ml.image.ImageSchema.readImages(String, SparkSession, boolean, int, boolean, double, long)
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.recall()
|
org.apache.spark.sql.Dataset.registerTempTable(String)
|
org.apache.spark.sql.SQLContext.setActive(SQLContext)
|
org.apache.spark.mllib.clustering.KMeans.setRuns(int)
|
org.apache.spark.sql.functions.toDegrees(Column)
|
org.apache.spark.sql.functions.toDegrees(String)
|
org.apache.spark.sql.functions.toRadians(Column)
|
org.apache.spark.sql.functions.toRadians(String)
|
org.apache.spark.mllib.clustering.KMeans.train(RDD<Vector>, int, int, int)
|
org.apache.spark.mllib.clustering.KMeans.train(RDD<Vector>, int, int, int, String)
|
org.apache.spark.mllib.clustering.KMeans.train(RDD<Vector>, int, int, int, String, long)
|
org.apache.spark.sql.functions.unboundedFollowing()
|
org.apache.spark.sql.functions.unboundedPreceding()
|
org.apache.spark.sql.Dataset.unionAll(Dataset<T>)
|