Code stringlengths 103 85.9k | Summary listlengths 0 94 |
|---|---|
Please provide a description of the function:def foreachRDD(self, func):
if func.__code__.co_argcount == 1:
old_func = func
func = lambda t, rdd: old_func(rdd)
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer)
api = self._ssc._jvm.PythonDStream
... | [
"\n Apply a function to each RDD in this DStream.\n "
] |
Please provide a description of the function:def pprint(self, num=10):
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("-------------------------------------------")
print("Time: %s" % time)
print("-------------------------------------------"... | [
"\n Print the first num elements of each RDD generated in this DStream.\n\n @param num: the number of elements from the first will be printed.\n "
] |
Please provide a description of the function:def persist(self, storageLevel):
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdstream.persist(javaStorageLevel)
return self | [
"\n Persist the RDDs of this DStream with the given storage level\n "
] |
Please provide a description of the function:def checkpoint(self, interval):
self.is_checkpointed = True
self._jdstream.checkpoint(self._ssc._jduration(interval))
return self | [
"\n Enable periodic checkpointing of RDDs of this DStream\n\n @param interval: time in seconds, after each period of that, generated\n RDD will be checkpointed\n "
] |
Please provide a description of the function:def groupByKey(self, numPartitions=None):
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions)) | [
"\n Return a new DStream by applying groupByKey on each RDD.\n "
] |
Please provide a description of the function:def countByValue(self):
return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y) | [
"\n Return a new DStream in which each RDD contains the counts of each\n distinct value in each RDD of this DStream.\n "
] |
Please provide a description of the function:def saveAsTextFiles(self, prefix, suffix=None):
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
# after recovered... | [
"\n Save each RDD in this DStream as at text file, using string\n representation of elements.\n "
] |
Please provide a description of the function:def transform(self, func):
if func.__code__.co_argcount == 1:
oldfunc = func
func = lambda t, rdd: oldfunc(rdd)
assert func.__code__.co_argcount == 2, "func should take one or two arguments"
return TransformedDStream(s... | [
"\n Return a new DStream in which each RDD is generated by applying a function\n on each RDD of this DStream.\n\n `func` can have one argument of `rdd`, or have two arguments of\n (`time`, `rdd`)\n "
] |
Please provide a description of the function:def transformWith(self, func, other, keepSerializer=False):
if func.__code__.co_argcount == 2:
oldfunc = func
func = lambda t, a, b: oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments... | [
"\n Return a new DStream in which each RDD is generated by applying a function\n on each RDD of this DStream and 'other' DStream.\n\n `func` can have two arguments of (`rdd_a`, `rdd_b`) or have three\n arguments of (`time`, `rdd_a`, `rdd_b`)\n "
] |
Please provide a description of the function:def union(self, other):
if self._slideDuration != other._slideDuration:
raise ValueError("the two DStream should have same slide duration")
return self.transformWith(lambda a, b: a.union(b), other, True) | [
"\n Return a new DStream by unifying data of another DStream with this DStream.\n\n @param other: Another DStream having the same interval (i.e., slideDuration)\n as this DStream.\n "
] |
Please provide a description of the function:def cogroup(self, other, numPartitions=None):
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other) | [
"\n Return a new DStream by applying 'cogroup' between RDDs of this\n DStream and `other` DStream.\n\n Hash partitioning is used to generate the RDDs with `numPartitions` partitions.\n "
] |
Please provide a description of the function:def _jtime(self, timestamp):
if isinstance(timestamp, datetime):
timestamp = time.mktime(timestamp.timetuple())
return self._sc._jvm.Time(long(timestamp * 1000)) | [
" Convert datetime or unix_timestamp into Time\n "
] |
Please provide a description of the function:def slice(self, begin, end):
jrdds = self._jdstream.slice(self._jtime(begin), self._jtime(end))
return [RDD(jrdd, self._sc, self._jrdd_deserializer) for jrdd in jrdds] | [
"\n Return all the RDDs between 'begin' to 'end' (both included)\n\n `begin`, `end` could be datetime.datetime() or unix_timestamp\n "
] |
Please provide a description of the function:def window(self, windowDuration, slideDuration=None):
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc... | [
"\n Return a new DStream in which each RDD contains all the elements in seen in a\n sliding window of time over this DStream.\n\n @param windowDuration: width of the window; must be a multiple of this DStream's\n batching interval\n @param slideDuration: sli... |
Please provide a description of the function:def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc,
windowDuration, slideD... | [
"\n Return a new DStream in which each RDD has a single element generated by reducing all\n elements in a sliding window over this DStream.\n\n if `invReduceFunc` is not None, the reduction is done incrementally\n using the old window's reduced value :\n\n 1. reduce the new values... |
Please provide a description of the function:def countByWindow(self, windowDuration, slideDuration):
return self.map(lambda x: 1).reduceByWindow(operator.add, operator.sub,
windowDuration, slideDuration) | [
"\n Return a new DStream in which each RDD has a single element generated\n by counting the number of elements in a window over this DStream.\n windowDuration and slideDuration are as defined in the window() operation.\n\n This is equivalent to window(windowDuration, slideDuration).count... |
Please provide a description of the function:def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None):
keyed = self.map(lambda x: (x, 1))
counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub,
windowDuration, slide... | [
"\n Return a new DStream in which each RDD contains the count of distinct elements in\n RDDs in a sliding window over this DStream.\n\n @param windowDuration: width of the window; must be a multiple of this DStream's\n batching interval\n @param slideDuration... |
Please provide a description of the function:def groupByKeyAndWindow(self, windowDuration, slideDuration, numPartitions=None):
ls = self.mapValues(lambda x: [x])
grouped = ls.reduceByKeyAndWindow(lambda a, b: a.extend(b) or a, lambda a, b: a[len(b):],
w... | [
"\n Return a new DStream by applying `groupByKey` over a sliding window.\n Similar to `DStream.groupByKey()`, but applies it over a sliding window.\n\n @param windowDuration: width of the window; must be a multiple of this DStream's\n batching interval\n @par... |
Please provide a description of the function:def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None,
numPartitions=None, filterFunc=None):
self._validate_window_param(windowDuration, slideDuration)
if numPartitions is None:
numP... | [
"\n Return a new DStream by applying incremental `reduceByKey` over a sliding window.\n\n The reduced value of over a new window is calculated using the old window's reduce value :\n 1. reduce the new values that entered the window (e.g., adding new counts)\n 2. \"inverse reduce\" the ... |
Please provide a description of the function:def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if initialRDD and not isinstance(initialRDD, RDD):
initialRDD = self._sc.paral... | [
"\n Return a new \"state\" DStream where the state for each key is updated by applying\n the given function on the previous state of the key and the new values of the key.\n\n @param updateFunc: State update function. If this function returns None, then\n corresponding... |
Please provide a description of the function:def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
kwargs = self._input_kwargs
return self._set(**kwargs) | [
"\n setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol=\"items\", \\\n predictionCol=\"prediction\", numPartitions=None)\n "
] |
Please provide a description of the function:def setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
kwargs = self._input_kwargs
return self._set(**kwargs) | [
"\n setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \\\n sequenceCol=\"sequence\")\n "
] |
Please provide a description of the function:def findFrequentSequentialPatterns(self, dataset):
self._transfer_params_to_java()
jdf = self._java_obj.findFrequentSequentialPatterns(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx) | [
"\n .. note:: Experimental\n\n Finds the complete set of frequent sequential patterns in the input sequences of itemsets.\n\n :param dataset: A dataframe containing a sequence column which is\n `ArrayType(ArrayType(T))` type, T is the item type for the input dataset.\n ... |
Please provide a description of the function:def first_spark_call():
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, lin... | [
"\n Return a CallSite representing the first Spark call in the current call stack.\n "
] |
Please provide a description of the function:def parsePoint(line):
values = [float(s) for s in line.split(' ')]
if values[0] == -1: # Convert -1 labels to 0 for MLlib
values[0] = 0
return LabeledPoint(values[0], values[1:]) | [
"\n Parse a line of text into an MLlib LabeledPoint object.\n "
] |
Please provide a description of the function:def fMeasure(self, label, beta=None):
if beta is None:
return self.call("fMeasure", label)
else:
return self.call("fMeasure", label, beta) | [
"\n Returns f-measure.\n "
] |
Please provide a description of the function:def precision(self, label=None):
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) | [
"\n Returns precision or precision for a given label (category) if specified.\n "
] |
Please provide a description of the function:def recall(self, label=None):
if label is None:
return self.call("recall")
else:
return self.call("recall", float(label)) | [
"\n Returns recall or recall for a given label (category) if specified.\n "
] |
Please provide a description of the function:def f1Measure(self, label=None):
if label is None:
return self.call("f1Measure")
else:
return self.call("f1Measure", float(label)) | [
"\n Returns f1Measure or f1Measure for a given label (category) if specified.\n "
] |
Please provide a description of the function:def _to_corrected_pandas_type(dt):
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return... | [
"\n When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.\n This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.\n "
] |
Please provide a description of the function:def rdd(self):
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd | [
"Returns the content as an :class:`pyspark.RDD` of :class:`Row`.\n "
] |
Please provide a description of the function:def toJSON(self, use_unicode=True):
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode)) | [
"Converts a :class:`DataFrame` into a :class:`RDD` of string.\n\n Each row is turned into a JSON document as one element in the returned RDD.\n\n >>> df.toJSON().first()\n u'{\"age\":2,\"name\":\"Alice\"}'\n "
] |
Please provide a description of the function:def schema(self):
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse da... | [
"Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.\n\n >>> df.schema\n StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))\n "
] |
Please provide a description of the function:def explain(self, extended=False):
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString()) | [
"Prints the (logical and physical) plans to the console for debugging purpose.\n\n :param extended: boolean, default ``False``. If ``False``, prints only the physical plan.\n\n >>> df.explain()\n == Physical Plan ==\n *(1) Scan ExistingRDD[age#0,name#1]\n\n >>> df.explain(True)\n ... |
Please provide a description of the function:def exceptAll(self, other):
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx) | [
"Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but\n not in another :class:`DataFrame` while preserving duplicates.\n\n This is equivalent to `EXCEPT ALL` in SQL.\n\n >>> df1 = spark.createDataFrame(\n ... [(\"a\", 1), (\"a\", 1), (\"a\", 1), (\"a\", ... |
Please provide a description of the function:def show(self, n=20, truncate=True, vertical=False):
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical)) | [
"Prints the first ``n`` rows to the console.\n\n :param n: Number of rows to show.\n :param truncate: If set to True, truncate strings longer than 20 chars by default.\n If set to a number greater than one, truncates long strings to length ``truncate``\n and align cells right.\n ... |
Please provide a description of the function:def _repr_html_(self):
import cgi
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
... | [
"Returns a dataframe with html code when you enabled eager evaluation\n by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are\n using support eager evaluation with HTML.\n "
] |
Please provide a description of the function:def checkpoint(self, eager=True):
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx) | [
"Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the\n logical plan of this DataFrame, which is especially useful in iterative algorithms where the\n plan may grow exponentially. It will be saved to files inside the checkpoint\n directory set with L{SparkCo... |
Please provide a description of the function:def localCheckpoint(self, eager=True):
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx) | [
"Returns a locally checkpointed version of this Dataset. Checkpointing can be used to\n truncate the logical plan of this DataFrame, which is especially useful in iterative\n algorithms where the plan may grow exponentially. Local checkpoints are stored in the\n executors using the caching subs... |
Please provide a description of the function:def withWatermark(self, eventTime, delayThreshold):
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise Ty... | [
"Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point\n in time before which we assume no more late data is going to arrive.\n\n Spark will use this watermark for several purposes:\n - To know when a given time window aggregation can be finalized and thus ca... |
Please provide a description of the function:def hint(self, name, *parameters):
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(na... | [
"Specifies some hint on the current DataFrame.\n\n :param name: A name of the hint.\n :param parameters: Optional parameters.\n :return: :class:`DataFrame`\n\n >>> df.join(df2.hint(\"broadcast\"), \"name\").show()\n +----+---+------+\n |name|age|height|\n +----+---+-... |
Please provide a description of the function:def collect(self):
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) | [
"Returns all the records as a list of :class:`Row`.\n\n >>> df.collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n "
] |
Please provide a description of the function:def toLocalIterator(self):
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator()
return _load_from_socket(sock_info, BatchedSerializer(PickleSerializer())) | [
"\n Returns an iterator that contains all of the rows in this :class:`DataFrame`.\n The iterator will consume as much memory as the largest partition in this DataFrame.\n\n >>> list(df.toLocalIterator())\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n "
] |
Please provide a description of the function:def limit(self, num):
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx) | [
"Limits the result count to the number specified.\n\n >>> df.limit(1).collect()\n [Row(age=2, name=u'Alice')]\n >>> df.limit(0).collect()\n []\n "
] |
Please provide a description of the function:def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self | [
"Sets the storage level to persist the contents of the :class:`DataFrame` across\n operations after the first time it is computed. This can only be used to assign\n a new storage level if the :class:`DataFrame` does not have a storage level set yet.\n If no storage level is specified defaults t... |
Please provide a description of the function:def storageLevel(self):
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_le... | [
"Get the :class:`DataFrame`'s current storage level.\n\n >>> df.storageLevel\n StorageLevel(False, False, False, False, 1)\n >>> df.cache().storageLevel\n StorageLevel(True, True, False, True, 1)\n >>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel\n StorageLevel(True, ... |
Please provide a description of the function:def unpersist(self, blocking=False):
self.is_cached = False
self._jdf.unpersist(blocking)
return self | [
"Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from\n memory and disk.\n\n .. note:: `blocking` default has changed to False to match Scala in 2.0.\n "
] |
Please provide a description of the function:def coalesce(self, numPartitions):
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx) | [
"\n Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.\n\n :param numPartitions: int, to specify the target number of partitions\n\n Similar to coalesce defined on an :class:`RDD`, this operation results in a\n narrow dependency, e.g. if you go from 1000 partit... |
Please provide a description of the function:def repartition(self, numPartitions, *cols):
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
... | [
"\n Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The\n resulting DataFrame is hash partitioned.\n\n :param numPartitions:\n can be an int to specify the target number of partitions or a Column.\n If it is a Column, it will be used as ... |
Please provide a description of the function:def sample(self, withReplacement=None, fraction=None, seed=None):
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withR... | [
"Returns a sampled subset of this :class:`DataFrame`.\n\n :param withReplacement: Sample with replacement or not (default False).\n :param fraction: Fraction of rows to generate, range [0.0, 1.0].\n :param seed: Seed for sampling (default a random seed).\n\n .. note:: This is not guarant... |
Please provide a description of the function:def sampleBy(self, col, fractions, seed=None):
if isinstance(col, basestring):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isi... | [
"\n Returns a stratified sample without replacement based on the\n fraction given on each stratum.\n\n :param col: column that defines strata\n :param fractions:\n sampling fraction for each stratum. If a stratum is not\n specified, we treat its fraction as zero.\n ... |
Please provide a description of the function:def randomSplit(self, weights, seed=None):
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
... | [
"Randomly splits this :class:`DataFrame` with the provided weights.\n\n :param weights: list of doubles as weights with which to split the DataFrame. Weights will\n be normalized if they don't sum up to 1.0.\n :param seed: The seed for sampling.\n\n >>> splits = df4.randomSplit([1.0,... |
Please provide a description of the function:def dtypes(self):
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields] | [
"Returns all column names and their data types as a list.\n\n >>> df.dtypes\n [('age', 'int'), ('name', 'string')]\n "
] |
Please provide a description of the function:def colRegex(self, colName):
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc) | [
"\n Selects column based on the column name specified as a regex and returns it\n as :class:`Column`.\n\n :param colName: string, column name specified as a regex.\n\n >>> df = spark.createDataFrame([(\"a\", 1), (\"b\", 2), (\"c\", 3)], [\"Col1\", \"Col2\"])\n >>> df.select(df.co... |
Please provide a description of the function:def alias(self, alias):
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx) | [
"Returns a new :class:`DataFrame` with an alias set.\n\n :param alias: string, an alias name to be set for the DataFrame.\n\n >>> from pyspark.sql.functions import *\n >>> df_as1 = df.alias(\"df_as1\")\n >>> df_as2 = df.alias(\"df_as2\")\n >>> joined_df = df_as1.join(df_as2, col(\... |
Please provide a description of the function:def crossJoin(self, other):
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx) | [
"Returns the cartesian product with another :class:`DataFrame`.\n\n :param other: Right side of the cartesian product.\n\n >>> df.select(\"age\", \"name\").collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df2.select(\"name\", \"height\").collect()\n [Row(name... |
Please provide a description of the function:def join(self, other, on=None, how=None):
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
a... | [
"Joins with another :class:`DataFrame`, using the given join expression.\n\n :param other: Right side of the join\n :param on: a string for the join column name, a list of column names,\n a join expression (Column), or a list of Columns.\n If `on` is a string or a list of strings... |
Please provide a description of the function:def sortWithinPartitions(self, *cols, **kwargs):
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx) | [
"Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).\n\n :param cols: list of :class:`Column` or column names to sort by.\n :param ascending: boolean or list of boolean (default True).\n Sort ascending vs. descending. Specify list for multiple sort orders... |
Please provide a description of the function:def _jseq(self, cols, converter=None):
return _to_seq(self.sql_ctx._sc, cols, converter) | [
"Return a JVM Seq of Columns from a list of Column or names"
] |
Please provide a description of the function:def _jcols(self, *cols):
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column) | [
"Return a JVM Seq of Columns from a list of Column or column names\n\n If `cols` has only one list in it, cols[0] will be used as the list.\n "
] |
Please provide a description of the function:def _sort_cols(self, cols, kwargs):
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
... | [
" Return a JVM Seq of Columns that describes the sort order\n "
] |
Please provide a description of the function:def describe(self, *cols):
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx) | [
"Computes basic statistics for numeric and string columns.\n\n This include count, mean, stddev, min, and max. If no columns are\n given, this function computes statistics for all numerical or string columns.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n ... |
Please provide a description of the function:def summary(self, *statistics):
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx) | [
"Computes specified statistics for numeric and string columns. Available statistics are:\n - count\n - mean\n - stddev\n - min\n - max\n - arbitrary approximate percentiles specified as a percentage (eg, 75%)\n\n If no statistics are given, this function computes cou... |
Please provide a description of the function:def head(self, n=None):
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n) | [
"Returns the first ``n`` rows.\n\n .. note:: This method should only be used if the resulting array is expected\n to be small, as all the data is loaded into the driver's memory.\n\n :param n: int, default 1. Number of rows to return.\n :return: If n is greater than 1, return a list ... |
Please provide a description of the function:def select(self, *cols):
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx) | [
"Projects a set of expressions and returns a new :class:`DataFrame`.\n\n :param cols: list of column names (string) or expressions (:class:`Column`).\n If one of the column names is '*', that column is expanded to include all columns\n in the current DataFrame.\n\n >>> df.select(... |
Please provide a description of the function:def selectExpr(self, *expr):
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx) | [
"Projects a set of SQL expressions and returns a new :class:`DataFrame`.\n\n This is a variant of :func:`select` that accepts SQL expressions.\n\n >>> df.selectExpr(\"age * 2\", \"abs(age)\").collect()\n [Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]\n "
] |
Please provide a description of the function:def filter(self, condition):
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("conditio... | [
"Filters rows using the given condition.\n\n :func:`where` is an alias for :func:`filter`.\n\n :param condition: a :class:`Column` of :class:`types.BooleanType`\n or a string of SQL expression.\n\n >>> df.filter(df.age > 3).collect()\n [Row(age=5, name=u'Bob')]\n >>> df... |
Please provide a description of the function:def groupBy(self, *cols):
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self) | [
"Groups the :class:`DataFrame` using the specified columns,\n so we can run aggregation on them. See :class:`GroupedData`\n for all the available aggregate functions.\n\n :func:`groupby` is an alias for :func:`groupBy`.\n\n :param cols: list of columns to group by.\n Each elem... |
Please provide a description of the function:def union(self, other):
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx) | [
" Return a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union\n (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n Also as standard in SQL, this function res... |
Please provide a description of the function:def unionByName(self, other):
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx) | [
" Returns a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set\n union (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n The difference... |
Please provide a description of the function:def intersect(self, other):
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx) | [
" Return a new :class:`DataFrame` containing rows only in\n both this frame and another frame.\n\n This is equivalent to `INTERSECT` in SQL.\n "
] |
Please provide a description of the function:def intersectAll(self, other):
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx) | [
" Return a new :class:`DataFrame` containing rows in both this dataframe and other\n dataframe while preserving duplicates.\n\n This is equivalent to `INTERSECT ALL` in SQL.\n >>> df1 = spark.createDataFrame([(\"a\", 1), (\"a\", 1), (\"b\", 3), (\"c\", 4)], [\"C1\", \"C2\"])\n >>> df2 = ... |
Please provide a description of the function:def subtract(self, other):
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx) | [
" Return a new :class:`DataFrame` containing rows in this frame\n but not in another frame.\n\n This is equivalent to `EXCEPT DISTINCT` in SQL.\n\n "
] |
Please provide a description of the function:def dropDuplicates(self, subset=None):
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx) | [
"Return a new :class:`DataFrame` with duplicate rows removed,\n optionally only considering certain columns.\n\n For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming\n :class:`DataFrame`, it will keep all data across triggers as intermediate state to drop\n ... |
Please provide a description of the function:def dropna(self, how='any', thresh=None, subset=None):
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif i... | [
"Returns a new :class:`DataFrame` omitting rows with null values.\n :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.\n\n :param how: 'any' or 'all'.\n If 'any', drop a row if it contains any nulls.\n If 'all', drop a row only if all its va... |
Please provide a description of the function:def fillna(self, value, subset=None):
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we d... | [
"Replace null values, alias for ``na.fill()``.\n :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.\n\n :param value: int, long, float, string, bool or dict.\n Value to replace null values with.\n If the value is a dict, then `subset` is ign... |
Please provide a description of the function:def replace(self, to_replace, value=_NoValue, subset=None):
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a... | [
"Returns a new :class:`DataFrame` replacing a value with another value.\n :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are\n aliases of each other.\n Values to_replace and value must have the same type and can only be numerics, booleans,\n or strings. Value can have... |
Please provide a description of the function:def approxQuantile(self, col, probabilities, relativeError):
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
... | [
"\n Calculates the approximate quantiles of numerical columns of a\n DataFrame.\n\n The result of this algorithm has the following deterministic bound:\n If the DataFrame has N elements and if we request the quantile at\n probability `p` up to error `err`, then the algorithm will ... |
Please provide a description of the function:def corr(self, col1, col2, method=None):
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method... | [
"\n Calculates the correlation of two columns of a DataFrame as a double value.\n Currently only supports the Pearson Correlation Coefficient.\n :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.\n\n :param col1: The name of the first column\n ... |
Please provide a description of the function:def cov(self, col1, col2):
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov... | [
"\n Calculate the sample covariance for the given columns, specified by their names, as a\n double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.\n\n :param col1: The name of the first column\n :param col2: The name of the second column\n "
] |
Please provide a description of the function:def crosstab(self, col1, col2):
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self.... | [
"\n Computes a pair-wise frequency table of the given columns. Also known as a contingency\n table. The number of distinct values for each column should be less than 1e4. At most 1e6\n non-zero pair frequencies will be returned.\n The first column of each row will be the distinct values ... |
Please provide a description of the function:def freqItems(self, cols, support=None):
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
... | [
"\n Finding frequent items for columns, possibly with false positives. Using the\n frequent element count algorithm described in\n \"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou\".\n :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.fre... |
Please provide a description of the function:def withColumn(self, colName, col):
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx) | [
"\n Returns a new :class:`DataFrame` by adding a column or replacing the\n existing column that has the same name.\n\n The column expression must be an expression over this DataFrame; attempting to add\n a column from some other dataframe will raise an error.\n\n :param colName: s... |
Please provide a description of the function:def withColumnRenamed(self, existing, new):
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx) | [
"Returns a new :class:`DataFrame` by renaming an existing column.\n This is a no-op if schema doesn't contain the given column name.\n\n :param existing: string, name of the existing column to rename.\n :param new: string, new name of the column.\n\n >>> df.withColumnRenamed('age', 'age2... |
Please provide a description of the function:def drop(self, *cols):
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else... | [
"Returns a new :class:`DataFrame` that drops the specified column.\n This is a no-op if schema doesn't contain the given column name(s).\n\n :param cols: a string name of the column to drop, or a\n :class:`Column` to drop, or a list of string name of the columns to drop.\n\n >>> df.d... |
Please provide a description of the function:def toDF(self, *cols):
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx) | [
"Returns a new class:`DataFrame` that with new specified column names\n\n :param cols: list of new column names (string)\n\n >>> df.toDF('f1', 'f2').collect()\n [Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]\n "
] |
Please provide a description of the function:def transform(self, func):
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result | [
"Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.\n\n :param func: a function that takes and returns a class:`DataFrame`.\n\n >>> from pyspark.sql.functions import col\n >>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], [\"int\", \"float\"])\n >>> d... |
Please provide a description of the function:def toPandas(self):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
if self.sql_ctx._conf.pandasRespectSessionTimeZone():
timezone = self.sql_ctx._conf.se... | [
"\n Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.\n\n This is only available if Pandas is installed and available.\n\n .. note:: This method should only be used if the resulting Pandas's DataFrame is expected\n to be small, as all the data is loaded ... |
Please provide a description of the function:def _collectAsArrow(self):
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
results = list(_load_from_... | [
"\n Returns all records as a list of ArrowRecordBatches, pyarrow must be installed\n and available on driver and worker Python environments.\n\n .. note:: Experimental.\n "
] |
Please provide a description of the function:def asDict(self, sample=False):
return {
'count': self.count(),
'mean': self.mean(),
'sum': self.sum(),
'min': self.min(),
'max': self.max(),
'stdev': self.stdev() if sample else self.sa... | [
"Returns the :class:`StatCounter` members as a ``dict``.\n\n >>> sc.parallelize([1., 2., 3., 4.]).stats().asDict()\n {'count': 4L,\n 'max': 4.0,\n 'mean': 2.5,\n 'min': 1.0,\n 'stdev': 1.2909944487358056,\n 'sum': 10.0,\n 'variance': 1.6666666666666667}\... |
Please provide a description of the function:def _list_function_infos(jvm):
jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos()
infos = []
for jinfo in jinfos:
name = jinfo.getName()
usage = jinfo.getUsage()
usage = usage.replace("_FUNC_", nam... | [
"\n Returns a list of function information via JVM. Sorts wrapped expression infos by name\n and returns them.\n "
] |
Please provide a description of the function:def _make_pretty_usage(usage):
if usage is not None and usage.strip() != "":
usage = "\n".join(map(lambda u: u.strip(), usage.split("\n")))
return "%s\n\n" % usage | [
"\n Makes the usage description pretty and returns a formatted string if `usage`\n is not an empty string. Otherwise, returns None.\n "
] |
Please provide a description of the function:def _make_pretty_arguments(arguments):
if arguments.startswith("\n Arguments:"):
arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:]))
return "**Arguments:**\n\n%s\n\n" % arguments | [
"\n Makes the arguments description pretty and returns a formatted string if `arguments`\n starts with the argument prefix. Otherwise, returns None.\n\n Expected input:\n\n Arguments:\n * arg0 - ...\n ...\n * arg0 - ...\n ...\n\n Expected output:\n *... |
Please provide a description of the function:def _make_pretty_examples(examples):
if examples.startswith("\n Examples:"):
examples = "\n".join(map(lambda u: u[6:], examples.strip().split("\n")[1:]))
return "**Examples:**\n\n```\n%s\n```\n\n" % examples | [
"\n Makes the examples description pretty and returns a formatted string if `examples`\n starts with the example prefix. Otherwise, returns None.\n\n Expected input:\n\n Examples:\n > SELECT ...;\n ...\n > SELECT ...;\n ...\n\n Expected output:\n **Example... |
Please provide a description of the function:def _make_pretty_note(note):
if note != "":
note = "\n".join(map(lambda n: n[4:], note.split("\n")))
return "**Note:**\n%s\n" % note | [
"\n Makes the note description pretty and returns a formatted string if `note` is not\n an empty string. Otherwise, returns None.\n\n Expected input:\n\n ...\n\n Expected output:\n **Note:**\n\n ...\n\n "
] |
Please provide a description of the function:def _make_pretty_deprecated(deprecated):
if deprecated != "":
deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n")))
return "**Deprecated:**\n%s\n" % deprecated | [
"\n Makes the deprecated description pretty and returns a formatted string if `deprecated`\n is not an empty string. Otherwise, returns None.\n\n Expected input:\n\n ...\n\n Expected output:\n **Deprecated:**\n\n ...\n\n "
] |
Please provide a description of the function:def generate_sql_markdown(jvm, path):
with open(path, 'w') as mdfile:
for info in _list_function_infos(jvm):
name = info.name
usage = _make_pretty_usage(info.usage)
arguments = _make_pretty_arguments(info.arguments)
... | [
"\n Generates a markdown file after listing the function information. The output file\n is created in `path`.\n\n Expected output:\n ### NAME\n\n USAGE\n\n **Arguments:**\n\n ARGUMENTS\n\n **Examples:**\n\n ```\n EXAMPLES\n ```\n\n **Note:**\n\n NOTE\n\n **Since:** SINCE\n\... |
Please provide a description of the function:def predict(self, x):
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
... | [
"\n Predict values for a single data point or an RDD of points\n using the model trained.\n "
] |
Please provide a description of the function:def save(self, sc, path):
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path) | [
"\n Save this model to the given path.\n "
] |
Please provide a description of the function:def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressio... | [
"\n Train a logistic regression model on the given data.\n\n :param data:\n The training data, an RDD of LabeledPoint.\n :param iterations:\n The number of iterations.\n (default: 100)\n :param initialWeights:\n The initial weights.\n (default... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.