org.apache.spark.SparkContext.accumulable(R, AccumulableParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.SparkContext.accumulable(R, String, AccumulableParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulable(T, AccumulableParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulable(T, String, AccumulableParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.SparkContext.accumulableCollection(R, Function1>, ClassTag)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(double)
use sc().doubleAccumulator(). Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(double, String)
use sc().doubleAccumulator(String). Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(int)
use sc().longAccumulator(). Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(int, String)
use sc().longAccumulator(String). Since 2.0.0.
|
org.apache.spark.SparkContext.accumulator(T, AccumulatorParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(T, AccumulatorParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.SparkContext.accumulator(T, String, AccumulatorParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.accumulator(T, String, AccumulatorParam)
use AccumulatorV2. Since 2.0.0.
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, Option, String)
do not create AccumulableInfo. Since 2.0.0.
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, Option, String, boolean)
do not create AccumulableInfo. Since 2.0.0.
|
org.apache.spark.scheduler.AccumulableInfo.apply(long, String, String)
do not create AccumulableInfo. Since 2.0.0.
|
org.apache.spark.sql.SQLContext.applySchema(JavaRDD>, Class>)
Use createDataFrame instead. Since 1.3.0.
|
org.apache.spark.sql.SQLContext.applySchema(JavaRDD, StructType)
Use createDataFrame instead. Since 1.3.0.
|
org.apache.spark.sql.SQLContext.applySchema(RDD>, Class>)
Use createDataFrame instead. Since 1.3.0.
|
org.apache.spark.sql.SQLContext.applySchema(RDD, StructType)
Use createDataFrame instead. Since 1.3.0.
|
org.apache.spark.sql.SQLContext.clearActive()
Use SparkSession.clearActiveSession instead. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.doubleAccumulator(double)
use sc().doubleAccumulator(). Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.doubleAccumulator(double, String)
use sc().doubleAccumulator(String). Since 2.0.0.
|
org.apache.spark.sql.Dataset.explode(Seq, Function1>, TypeTags.TypeTag)
use flatMap() or select() with functions.explode() instead. Since 2.0.0.
|
org.apache.spark.sql.Dataset.explode(String, String, Function1>, TypeTags.TypeTag)
use flatMap() or select() with functions.explode() instead. Since 2.0.0.
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.fMeasure()
Use accuracy. Since 2.0.0.
|
org.apache.spark.sql.SQLContext.getOrCreate(SparkContext)
Use SparkSession.builder instead. Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.intAccumulator(int)
use sc().longAccumulator(). Since 2.0.0.
|
org.apache.spark.api.java.JavaSparkContext.intAccumulator(int, String)
use sc().longAccumulator(String). Since 2.0.0.
|
org.apache.spark.TaskContext.isRunningLocally()
Local execution was removed, so this always returns false. Since 2.0.0.
|
org.apache.spark.sql.SQLContext.jdbc(String, String)
As of 1.4.0, replaced by read().jdbc() .
|
org.apache.spark.sql.SQLContext.jdbc(String, String, String[])
As of 1.4.0, replaced by read().jdbc() .
|
org.apache.spark.sql.SQLContext.jdbc(String, String, String, long, long, int)
As of 1.4.0, replaced by read().jdbc() .
|
org.apache.spark.sql.SQLContext.jsonFile(String)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonFile(String, double)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonFile(String, StructType)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD, double)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(JavaRDD, StructType)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD, double)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.jsonRDD(RDD, StructType)
As of 1.4.0, replaced by read().json() .
|
org.apache.spark.sql.SQLContext.load(String)
As of 1.4.0, replaced by read().load(path) .
|
org.apache.spark.sql.SQLContext.load(String, Map)
As of 1.4.0, replaced by read().format(source).options(options).load() .
|
org.apache.spark.sql.SQLContext.load(String, Map)
As of 1.4.0, replaced by read().format(source).options(options).load() .
|
org.apache.spark.sql.SQLContext.load(String, String)
As of 1.4.0, replaced by read().format(source).load(path) .
|
org.apache.spark.sql.SQLContext.load(String, StructType, Map)
As of 1.4.0, replaced by
read().format(source).schema(schema).options(options).load() .
|
org.apache.spark.sql.SQLContext.load(String, StructType, Map)
As of 1.4.0, replaced by
read().format(source).schema(schema).options(options).load() .
|
org.apache.spark.ml.regression.LinearRegressionSummary.model()
The model field is deprecated and will be removed in 2.1.0. Since 2.0.0.
|
org.apache.spark.sql.functions.monotonicallyIncreasingId()
Use monotonically_increasing_id(). Since 2.0.0.
|
org.apache.spark.ml.classification.RandomForestClassificationModel.numTrees()
Use getNumTrees instead. This method will be removed in 2.1.0
|
org.apache.spark.ml.regression.RandomForestRegressionModel.numTrees()
Use getNumTrees instead. This method will be removed in 2.1.0
|
org.apache.spark.sql.SQLContext.parquetFile(Seq)
Use read.parquet() instead. Since 1.4.0.
|
org.apache.spark.sql.SQLContext.parquetFile(String...)
As of 1.4.0, replaced by read().parquet() .
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.precision()
Use accuracy. Since 2.0.0.
|
org.apache.spark.mllib.evaluation.MulticlassMetrics.recall()
Use accuracy. Since 2.0.0.
|
org.apache.spark.sql.Dataset.registerTempTable(String)
Use createOrReplaceTempView(viewName) instead. Since 2.0.0.
|
org.apache.spark.sql.SQLContext.setActive(SQLContext)
Use SparkSession.setActiveSession instead. Since 2.0.0.
|
org.apache.spark.ml.feature.ChiSqSelectorModel.setLabelCol(String)
labelCol is not used by ChiSqSelectorModel. Since 2.0.0.
|
org.apache.spark.sql.Dataset.unionAll(Dataset)
use union(). Since 2.0.0.
|
org.apache.spark.ml.param.Params.validateParams()
Will be removed in 2.1.0. All the checks should be merged into transformSchema
|