abstract class CassandraRDD[R] extends RDD[R]
- Alphabetic
- By Inheritance
- CassandraRDD
- RDD
- Logging
- Serializable
- Serializable
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
- new CassandraRDD(sc: SparkContext, dep: Seq[Dependency[_]])(implicit arg0: ClassTag[R])
Type Members
-
abstract
type
Self <: CassandraRDD[R]
This is slightly different than Scala this.type.
This is slightly different than Scala this.type. this.type is the unique singleton type of an object which is not compatible with other instances of the same type, so returning anything other than
this
is not really possible without lying to the compiler by explicit casts. Here SelfType is used to return a copy of the object - a different instance of the same type
Abstract Value Members
-
abstract
def
cassandraCount(): Long
Counts the number of items in this RDD by selecting count(*) on Cassandra table
-
abstract
def
clusteringOrder: Option[ClusteringOrder]
- Attributes
- protected
-
abstract
def
columnNames: ColumnSelector
- Attributes
- protected
-
abstract
def
compute(split: Partition, context: TaskContext): Iterator[R]
- Definition Classes
- RDD
- Annotations
- @DeveloperApi()
-
abstract
def
connector: CassandraConnector
- Attributes
- protected
-
abstract
def
copy(columnNames: ColumnSelector = columnNames, where: CqlWhereClause = where, limit: Option[CassandraLimit] = limit, clusteringOrder: Option[ClusteringOrder] = None, readConf: ReadConf = readConf, connector: CassandraConnector = connector): Self
Allows to copy this RDD with changing some of the properties
Allows to copy this RDD with changing some of the properties
- Attributes
- protected
-
abstract
def
getPartitions: Array[Partition]
- Attributes
- protected
- Definition Classes
- RDD
-
abstract
def
keyspaceName: String
- Attributes
- protected[spark.connector]
-
abstract
def
limit: Option[CassandraLimit]
- Attributes
- protected
-
abstract
def
narrowColumnSelection(columns: Seq[ColumnRef]): Seq[ColumnRef]
- Attributes
- protected
-
abstract
def
readConf: ReadConf
- Attributes
- protected
- abstract val selectedColumnRefs: Seq[ColumnRef]
-
abstract
def
tableName: String
- Attributes
- protected[spark.connector]
- abstract def toEmptyCassandraRDD: EmptyCassandraRDD[R]
-
abstract
def
where: CqlWhereClause
- Attributes
- protected
Concrete Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
def
++(other: RDD[R]): RDD[R]
- Definition Classes
- RDD
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
aggregate[U](zeroValue: U)(seqOp: (U, R) ⇒ U, combOp: (U, U) ⇒ U)(implicit arg0: ClassTag[U]): U
- Definition Classes
- RDD
- def as[B, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6], arg8: TypeConverter[A7], arg9: TypeConverter[A8], arg10: TypeConverter[A9], arg11: TypeConverter[A10], arg12: TypeConverter[A11]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6], arg8: TypeConverter[A7], arg9: TypeConverter[A8], arg10: TypeConverter[A9], arg11: TypeConverter[A10]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6], arg8: TypeConverter[A7], arg9: TypeConverter[A8], arg10: TypeConverter[A9]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5, A6, A7, A8](f: (A0, A1, A2, A3, A4, A5, A6, A7, A8) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6], arg8: TypeConverter[A7], arg9: TypeConverter[A8]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5, A6, A7](f: (A0, A1, A2, A3, A4, A5, A6, A7) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6], arg8: TypeConverter[A7]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5, A6](f: (A0, A1, A2, A3, A4, A5, A6) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5], arg7: TypeConverter[A6]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4, A5](f: (A0, A1, A2, A3, A4, A5) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4], arg6: TypeConverter[A5]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3, A4](f: (A0, A1, A2, A3, A4) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3], arg5: TypeConverter[A4]): CassandraRDD[B]
- def as[B, A0, A1, A2, A3](f: (A0, A1, A2, A3) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2], arg4: TypeConverter[A3]): CassandraRDD[B]
- def as[B, A0, A1, A2](f: (A0, A1, A2) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1], arg3: TypeConverter[A2]): CassandraRDD[B]
- def as[B, A0, A1](f: (A0, A1) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0], arg2: TypeConverter[A1]): CassandraRDD[B]
-
def
as[B, A0](f: (A0) ⇒ B)(implicit arg0: ClassTag[B], arg1: TypeConverter[A0]): CassandraRDD[B]
Maps each row into object of a different type using provided function taking column value(s) as argument(s).
Maps each row into object of a different type using provided function taking column value(s) as argument(s). Can be used to convert each row to a tuple or a case class object:
sc.cassandraTable("ks", "table") .select("column1") .as((s: String) => s) // yields CassandraRDD[String] sc.cassandraTable("ks", "table") .select("column1", "column2") .as((_: String, _: Long)) // yields CassandraRDD[(String, Long)] case class MyRow(key: String, value: Long) sc.cassandraTable("ks", "table") .select("column1", "column2") .as(MyRow) // yields CassandraRDD[MyRow]
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
barrier(): RDDBarrier[R]
- Definition Classes
- RDD
- Annotations
- @Experimental() @Since( "2.4.0" )
-
def
cache(): CassandraRDD.this.type
- Definition Classes
- RDD
-
def
cartesian[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(R, U)]
- Definition Classes
- RDD
-
def
checkpoint(): Unit
- Definition Classes
- RDD
-
def
cleanShuffleDependencies(blocking: Boolean): Unit
- Definition Classes
- RDD
- Annotations
- @DeveloperApi() @Since( "3.1.0" )
-
def
clearDependencies(): Unit
- Attributes
- protected
- Definition Classes
- RDD
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native() @HotSpotIntrinsicCandidate()
-
def
clusteringOrder(order: ClusteringOrder): Self
Adds a CQL
ORDER BY
clause to the query.Adds a CQL
ORDER BY
clause to the query. It can be applied only in case there are clustering columns and primary key predicate is pushed down inwhere
. It is useful when the default direction of ordering rows within a single Cassandra partition needs to be changed. -
def
coalesce(numPartitions: Int, shuffle: Boolean, partitionCoalescer: Option[PartitionCoalescer])(implicit ord: Ordering[R]): RDD[R]
- Definition Classes
- RDD
-
def
collect[U](f: PartialFunction[R, U])(implicit arg0: ClassTag[U]): RDD[U]
- Definition Classes
- RDD
-
def
collect(): Array[R]
- Definition Classes
- RDD
-
def
context: SparkContext
- Definition Classes
- RDD
-
def
convertTo[B](implicit arg0: ClassTag[B], arg1: RowReaderFactory[B]): CassandraRDD[B]
- Attributes
- protected
-
def
count(): Long
- Definition Classes
- RDD
-
def
countApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
- Definition Classes
- RDD
-
def
countApproxDistinct(relativeSD: Double): Long
- Definition Classes
- RDD
-
def
countApproxDistinct(p: Int, sp: Int): Long
- Definition Classes
- RDD
-
def
countByValue()(implicit ord: Ordering[R]): Map[R, Long]
- Definition Classes
- RDD
-
def
countByValueApprox(timeout: Long, confidence: Double)(implicit ord: Ordering[R]): PartialResult[Map[R, BoundedDouble]]
- Definition Classes
- RDD
-
final
def
dependencies: Seq[Dependency[_]]
- Definition Classes
- RDD
-
def
distinct(): RDD[R]
- Definition Classes
- RDD
-
def
distinct(numPartitions: Int)(implicit ord: Ordering[R]): RDD[R]
- Definition Classes
- RDD
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
filter(f: (R) ⇒ Boolean): RDD[R]
- Definition Classes
- RDD
-
def
first(): R
- Definition Classes
- RDD
-
def
firstParent[U](implicit arg0: ClassTag[U]): RDD[U]
- Attributes
- protected[org.apache.spark]
- Definition Classes
- RDD
-
def
flatMap[U](f: (R) ⇒ TraversableOnce[U])(implicit arg0: ClassTag[U]): RDD[U]
- Definition Classes
- RDD
-
def
fold(zeroValue: R)(op: (R, R) ⇒ R): R
- Definition Classes
- RDD
-
def
foreach(f: (R) ⇒ Unit): Unit
- Definition Classes
- RDD
-
def
foreachPartition(f: (Iterator[R]) ⇒ Unit): Unit
- Definition Classes
- RDD
-
def
getCheckpointFile: Option[String]
- Definition Classes
- RDD
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
def
getDependencies: Seq[Dependency[_]]
- Attributes
- protected
- Definition Classes
- RDD
-
final
def
getNumPartitions: Int
- Definition Classes
- RDD
- Annotations
- @Since( "1.6.0" )
-
def
getOutputDeterministicLevel: org.apache.spark.rdd.DeterministicLevel.Value
- Attributes
- protected
- Definition Classes
- RDD
- Annotations
- @DeveloperApi()
-
def
getPreferredLocations(split: Partition): Seq[String]
- Attributes
- protected
- Definition Classes
- RDD
-
def
getResourceProfile(): ResourceProfile
- Definition Classes
- RDD
- Annotations
- @Experimental() @Since( "3.1.0" )
-
def
getStorageLevel: StorageLevel
- Definition Classes
- RDD
-
def
glom(): RDD[Array[R]]
- Definition Classes
- RDD
-
def
groupBy[K](f: (R) ⇒ K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K]): RDD[(K, Iterable[R])]
- Definition Classes
- RDD
-
def
groupBy[K](f: (R) ⇒ K, numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[R])]
- Definition Classes
- RDD
-
def
groupBy[K](f: (R) ⇒ K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[R])]
- Definition Classes
- RDD
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
val
id: Int
- Definition Classes
- RDD
-
def
initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
def
initializeLogIfNecessary(isInterpreter: Boolean): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
intersection(other: RDD[R], numPartitions: Int): RDD[R]
- Definition Classes
- RDD
-
def
intersection(other: RDD[R], partitioner: Partitioner)(implicit ord: Ordering[R]): RDD[R]
- Definition Classes
- RDD
-
def
intersection(other: RDD[R]): RDD[R]
- Definition Classes
- RDD
-
lazy val
isBarrier_: Boolean
- Attributes
- protected
- Definition Classes
- RDD
- Annotations
- @transient()
-
def
isCheckpointed: Boolean
- Definition Classes
- RDD
-
def
isEmpty(): Boolean
- Definition Classes
- RDD
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
def
isTraceEnabled(): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
final
def
iterator(split: Partition, context: TaskContext): Iterator[R]
- Definition Classes
- RDD
-
def
keyBy[K](f: (R) ⇒ K): RDD[(K, R)]
- Definition Classes
- RDD
-
def
limit(rowLimit: Long): Self
Adds the limit clause to CQL select statement.
Adds the limit clause to CQL select statement. The limit will be applied for each created Spark partition. In other words, unless the data are fetched from a single Cassandra partition the number of results is unpredictable.
The main purpose of passing limit clause is to fetch top n rows from a single Cassandra partition when the table is designed so that it uses clustering keys and a partition key predicate is passed to the where clause.
-
def
localCheckpoint(): CassandraRDD.this.type
- Definition Classes
- RDD
-
def
log: Logger
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logName: String
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
map[U](f: (R) ⇒ U)(implicit arg0: ClassTag[U]): RDD[U]
- Definition Classes
- RDD
-
def
mapPartitions[U](f: (Iterator[R]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
- Definition Classes
- RDD
-
def
mapPartitionsWithIndex[U](f: (Int, Iterator[R]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
- Definition Classes
- RDD
-
def
max()(implicit ord: Ordering[R]): R
- Definition Classes
- RDD
-
def
min()(implicit ord: Ordering[R]): R
- Definition Classes
- RDD
-
var
name: String
- Definition Classes
- RDD
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
def
parent[U](j: Int)(implicit arg0: ClassTag[U]): RDD[U]
- Attributes
- protected[org.apache.spark]
- Definition Classes
- RDD
-
val
partitioner: Option[Partitioner]
- Definition Classes
- RDD
-
final
def
partitions: Array[Partition]
- Definition Classes
- RDD
-
def
perPartitionLimit(rowLimit: Long): Self
Adds the PER PARTITION LIMIT clause to CQL select statement.
Adds the PER PARTITION LIMIT clause to CQL select statement. The limit will be applied for every Cassandra Partition. Only Valid For Cassandra 3.6+
-
def
persist(): CassandraRDD.this.type
- Definition Classes
- RDD
-
def
persist(newLevel: StorageLevel): CassandraRDD.this.type
- Definition Classes
- RDD
-
def
pipe(command: Seq[String], env: Map[String, String], printPipeContext: ((String) ⇒ Unit) ⇒ Unit, printRDDElement: (R, (String) ⇒ Unit) ⇒ Unit, separateWorkingDir: Boolean, bufferSize: Int, encoding: String): RDD[String]
- Definition Classes
- RDD
-
def
pipe(command: String, env: Map[String, String]): RDD[String]
- Definition Classes
- RDD
-
def
pipe(command: String): RDD[String]
- Definition Classes
- RDD
-
final
def
preferredLocations(split: Partition): Seq[String]
- Definition Classes
- RDD
-
def
randomSplit(weights: Array[Double], seed: Long): Array[RDD[R]]
- Definition Classes
- RDD
-
def
reduce(f: (R, R) ⇒ R): R
- Definition Classes
- RDD
-
def
repartition(numPartitions: Int)(implicit ord: Ordering[R]): RDD[R]
- Definition Classes
- RDD
-
def
sample(withReplacement: Boolean, fraction: Double, seed: Long): RDD[R]
- Definition Classes
- RDD
-
def
saveAsObjectFile(path: String): Unit
- Definition Classes
- RDD
-
def
saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit
- Definition Classes
- RDD
-
def
saveAsTextFile(path: String): Unit
- Definition Classes
- RDD
-
def
select(columns: ColumnRef*): Self
Narrows down the selected set of columns.
Narrows down the selected set of columns. Use this for better performance, when you don't need all the columns in the result RDD. When called multiple times, it selects the subset of the already selected columns, so after a column was removed by the previous
select
call, it is not possible to add it back.The selected columns are ColumnRef instances. This type allows to specify columns for straightforward retrieval and to read TTL or write time of regular columns as well. Implicit conversions included in com.datastax.spark.connector package make it possible to provide just column names (which is also backward compatible) and optional add
.ttl
or.writeTime
suffix in order to create an appropriate ColumnRef instance. - def selectedColumnNames: Seq[String]
-
def
setName(_name: String): CassandraRDD.this.type
- Definition Classes
- RDD
-
def
sortBy[K](f: (R) ⇒ K, ascending: Boolean, numPartitions: Int)(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[R]
- Definition Classes
- RDD
-
def
sparkContext: SparkContext
- Definition Classes
- RDD
-
def
subtract(other: RDD[R], p: Partitioner)(implicit ord: Ordering[R]): RDD[R]
- Definition Classes
- RDD
-
def
subtract(other: RDD[R], numPartitions: Int): RDD[R]
- Definition Classes
- RDD
-
def
subtract(other: RDD[R]): RDD[R]
- Definition Classes
- RDD
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
take(num: Int): Array[R]
- Definition Classes
- CassandraRDD → RDD
-
def
takeOrdered(num: Int)(implicit ord: Ordering[R]): Array[R]
- Definition Classes
- RDD
-
def
takeSample(withReplacement: Boolean, num: Int, seed: Long): Array[R]
- Definition Classes
- RDD
-
def
toDebugString: String
- Definition Classes
- RDD
-
def
toJavaRDD(): JavaRDD[R]
- Definition Classes
- RDD
-
def
toLocalIterator: Iterator[R]
- Definition Classes
- RDD
-
def
toString(): String
- Definition Classes
- RDD → AnyRef → Any
-
def
top(num: Int)(implicit ord: Ordering[R]): Array[R]
- Definition Classes
- RDD
-
def
treeAggregate[U](zeroValue: U, seqOp: (U, R) ⇒ U, combOp: (U, U) ⇒ U, depth: Int, finalAggregateOnExecutor: Boolean)(implicit arg0: ClassTag[U]): U
- Definition Classes
- RDD
-
def
treeAggregate[U](zeroValue: U)(seqOp: (U, R) ⇒ U, combOp: (U, U) ⇒ U, depth: Int)(implicit arg0: ClassTag[U]): U
- Definition Classes
- RDD
-
def
treeReduce(f: (R, R) ⇒ R, depth: Int): R
- Definition Classes
- RDD
-
def
union(other: RDD[R]): RDD[R]
- Definition Classes
- RDD
-
def
unpersist(blocking: Boolean): CassandraRDD.this.type
- Definition Classes
- RDD
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
def
where(cql: String, values: Any*): Self
Adds a CQL
WHERE
predicate(s) to the query.Adds a CQL
WHERE
predicate(s) to the query. Useful for leveraging secondary indexes in Cassandra. Implicitly adds anALLOW FILTERING
clause to the WHERE clause, however beware that some predicates might be rejected by Cassandra, particularly in cases when they filter on an unindexed, non-clustering column. - def withAscOrder: Self
-
def
withConnector(connector: CassandraConnector): Self
Returns a copy of this Cassandra RDD with specified connector
- def withDescOrder: Self
-
def
withReadConf(readConf: ReadConf): Self
Allows to set custom read configuration, e.g.
Allows to set custom read configuration, e.g. consistency level or fetch size.
-
def
withResources(rp: ResourceProfile): CassandraRDD.this.type
- Definition Classes
- RDD
- Annotations
- @Experimental() @Since( "3.1.0" )
-
def
zip[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(R, U)]
- Definition Classes
- RDD
-
def
zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])(f: (Iterator[R], Iterator[B], Iterator[C], Iterator[D]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)(f: (Iterator[R], Iterator[B], Iterator[C], Iterator[D]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C])(f: (Iterator[R], Iterator[B], Iterator[C]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)(f: (Iterator[R], Iterator[B], Iterator[C]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipPartitions[B, V](rdd2: RDD[B])(f: (Iterator[R], Iterator[B]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipPartitions[B, V](rdd2: RDD[B], preservesPartitioning: Boolean)(f: (Iterator[R], Iterator[B]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
- Definition Classes
- RDD
-
def
zipWithIndex(): RDD[(R, Long)]
- Definition Classes
- RDD
-
def
zipWithUniqueId(): RDD[(R, Long)]
- Definition Classes
- RDD
Deprecated Value Members
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] ) @Deprecated @deprecated
- Deprecated
(Since version ) see corresponding Javadoc for more information.