object Index
- Alphabetic
- By Inheritance
- Index
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
fromNTSeq(compatible: Index, read: NTSeq)(implicit spark: SparkSession): Index
Construct a new counting index from the given sequence.
Construct a new counting index from the given sequence. K-mers will not be normalized. This method is not intended for large amounts of data, as everything has to go through the Spark driver.
- compatible
A compatible index to copy parameters from
- read
Sequence to index
-
def
fromNTSeqs(compatible: Index, reads: Seq[NTSeq])(implicit spark: SparkSession): Index
Construct a new counting index from the given sequences.
Construct a new counting index from the given sequences. K-mers will not be normalized. This method is not intended for large amounts of data, as everything has to go through the Spark driver.
- compatible
A compatible index to copy parameters from
- reads
Sequences to index
-
def
fromNTSeqs(compatible: Index, reads: Dataset[NTSeq])(implicit spark: SparkSession): Index
Construct a new counting index from the given sequences.
Construct a new counting index from the given sequences. K-mers will not be normalized.
- compatible
A compatible index to copy parameters from
- reads
Sequences to index
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def getIndexSplitter(location: String, k: Int)(implicit spark: SparkSession): AnyMinSplitter
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- val random: SplittableRandom
- def randomTableName: String
-
def
reSplitBuckets(input: Dataset[ReducibleBucket], reducer: Reducer, spl: Broadcast[AnyMinSplitter])(implicit spark: SparkSession): Dataset[ReducibleBucket]
Split buckets into supermer/tag pairs according to a new minimizer ordering, constructing new buckets from the result.
Split buckets into supermer/tag pairs according to a new minimizer ordering, constructing new buckets from the result. The resulting buckets will have shorter supermers but will respect the new ordering.
- input
buckets to split
- reducer
reducer to use for compacting the result
- spl
a splitter that reflects the new ordering
- def read(location: String, knownParams: Option[IndexParams] = None)(implicit spark: SparkSession): Index
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- def write(data: DataFrame, location: String, numBuckets: Int): Unit