Skip to content

Commit 870279a

Browse files
committed
More fixes of docs.
1 parent b0e8459 commit 870279a

File tree

8 files changed

+37
-34
lines changed

8 files changed

+37
-34
lines changed

R/pkg/R/DataFrame.R

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -932,7 +932,7 @@ setMethod("sample_frac",
932932
#' @param x a SparkDataFrame.
933933
#' @family SparkDataFrame functions
934934
#' @rdname nrow
935-
#' @name count
935+
#' @name nrow
936936
#' @aliases count,SparkDataFrame-method
937937
#' @export
938938
#' @examples
@@ -1214,9 +1214,9 @@ setMethod("toRDD",
12141214
#'
12151215
#' Groups the SparkDataFrame using the specified columns, so we can run aggregation on them.
12161216
#'
1217-
#' @param x a SparkDataFrame
1217+
#' @param x a SparkDataFrame.
12181218
#' @param ... variable(s) (character names(s) or Column(s)) to group on.
1219-
#' @return a GroupedData
1219+
#' @return A GroupedData.
12201220
#' @family SparkDataFrame functions
12211221
#' @aliases groupBy,SparkDataFrame-method
12221222
#' @rdname groupBy
@@ -3037,8 +3037,8 @@ setMethod("str",
30373037
#' This is a no-op if schema doesn't contain column name(s).
30383038
#'
30393039
#' @param x a SparkDataFrame.
3040-
#' @param ... further arguments to be passed to or from other methods.
30413040
#' @param col a character vector of column names or a Column.
3041+
#' @param ... further arguments to be passed to or from other methods.
30423042
#' @return A SparkDataFrame.
30433043
#'
30443044
#' @family SparkDataFrame functions
@@ -3058,7 +3058,7 @@ setMethod("str",
30583058
#' @note drop since 2.0.0
30593059
setMethod("drop",
30603060
signature(x = "SparkDataFrame"),
3061-
function(x, col) {
3061+
function(x, col, ...) {
30623062
stopifnot(class(col) == "character" || class(col) == "Column")
30633063

30643064
if (class(col) == "Column") {
@@ -3218,11 +3218,11 @@ setMethod("histogram",
32183218
#' and to not change the existing data.
32193219
#' }
32203220
#'
3221-
#' @param x A SparkDataFrame
3222-
#' @param url JDBC database url of the form `jdbc:subprotocol:subname`
3223-
#' @param tableName The name of the table in the external database
3221+
#' @param x s SparkDataFrame.
3222+
#' @param url JDBC database url of the form `jdbc:subprotocol:subname`.
3223+
#' @param tableName yhe name of the table in the external database.
3224+
#' @param mode one of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default).
32243225
#' @param ... additional JDBC database connection properties.
3225-
#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode (it is 'error' by default)
32263226
#' @family SparkDataFrame functions
32273227
#' @rdname write.jdbc
32283228
#' @name write.jdbc

R/pkg/R/SQLContext.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -730,7 +730,7 @@ dropTempView <- function(viewName) {
730730
#' @param source The name of external data source
731731
#' @param schema The data schema defined in structType
732732
#' @param na.strings Default string value for NA when source is "csv"
733-
#' @param ... additional external data source specific named propertie(s).
733+
#' @param ... additional external data source specific named properties.
734734
#' @return SparkDataFrame
735735
#' @rdname read.df
736736
#' @name read.df

R/pkg/R/WindowSpec.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ setMethod("show", "WindowSpec",
5555
#' Defines the partitioning columns in a WindowSpec.
5656
#'
5757
#' @param x a WindowSpec.
58-
#' @param col a column to partition on (desribed by the name or Column object).
58+
#' @param col a column to partition on (desribed by the name or Column).
5959
#' @param ... additional column(s) to partition on.
6060
#' @return A WindowSpec.
6161
#' @rdname partitionBy
@@ -88,7 +88,7 @@ setMethod("partitionBy",
8888
#'
8989
#' Defines the ordering columns in a WindowSpec.
9090
#' @param x a WindowSpec
91-
#' @param col a character or Column object indicating an ordering column
91+
#' @param col a character or Column indicating an ordering column
9292
#' @param ... additional sorting fields
9393
#' @return A WindowSpec.
9494
#' @name orderBy
@@ -194,7 +194,7 @@ setMethod("rangeBetween",
194194
#'
195195
#' Define a windowing column.
196196
#'
197-
#' @param x a Column object, usually one returned by window function(s).
197+
#' @param x a Column, usually one returned by window function(s).
198198
#' @param window a WindowSpec object. Can be created by `windowPartitionBy` or
199199
#' `windowOrderBy` and configured by other WindowSpec methods.
200200
#' @rdname over

R/pkg/R/column.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ setMethod("alias",
163163
#' @family colum_func
164164
#' @aliases substr,Column-method
165165
#'
166-
#' @param x a Column object.
166+
#' @param x a Column.
167167
#' @param start starting position.
168168
#' @param stop ending position.
169169
#' @note substr since 1.4.0
@@ -220,7 +220,7 @@ setMethod("endsWith", signature(x = "Column"),
220220
#' @family colum_func
221221
#' @aliases between,Column-method
222222
#'
223-
#' @param x a Column object
223+
#' @param x a Column
224224
#' @param bounds lower and upper bounds
225225
#' @note between since 1.5.0
226226
setMethod("between", signature(x = "Column"),
@@ -235,7 +235,7 @@ setMethod("between", signature(x = "Column"),
235235

236236
#' Casts the column to a different data type.
237237
#'
238-
#' @param x a Column object.
238+
#' @param x a Column.
239239
#' @param dataType a character object describing the target data type.
240240
#' See
241241
#' \href{https://spark.apache.org/docs/latest/sparkr.html#data-type-mapping-between-r-and-spark}{

R/pkg/R/functions.R

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ setMethod("column",
316316
#'
317317
#' Computes the Pearson Correlation Coefficient for two Columns.
318318
#'
319-
#' @param col2 a (second) Column object.
319+
#' @param col2 a (second) Column.
320320
#'
321321
#' @rdname corr
322322
#' @name corr
@@ -357,8 +357,8 @@ setMethod("cov", signature(x = "characterOrColumn"),
357357

358358
#' @rdname cov
359359
#'
360-
#' @param col1 the first Column object.
361-
#' @param col2 the second Column object.
360+
#' @param col1 the first Column.
361+
#' @param col2 the second Column.
362362
#' @name covar_samp
363363
#' @aliases covar_samp,characterOrColumn,characterOrColumn-method
364364
#' @note covar_samp since 2.0.0
@@ -446,8 +446,8 @@ setMethod("cosh",
446446
#'
447447
#' Returns the number of items in a group. This is a column aggregate function.
448448
#'
449-
#' @rdname n
450-
#' @name n
449+
#' @rdname count
450+
#' @name count
451451
#' @family agg_funcs
452452
#' @aliases count,Column-method
453453
#' @export
@@ -1270,14 +1270,14 @@ setMethod("round",
12701270

12711271
#' bround
12721272
#'
1273-
#' Returns the value of the column `e` rounded to `scale` decimal places using HALF_EVEN rounding
1274-
#' mode if `scale` >= 0 or at integer part when `scale` < 0.
1273+
#' Returns the value of the column \code{e} rounded to \code{scale} decimal places using HALF_EVEN rounding
1274+
#' mode if \code{scale} >= 0 or at integer part when \code{scale} < 0.
12751275
#' Also known as Gaussian rounding or bankers' rounding that rounds to the nearest even number.
12761276
#' bround(2.5, 0) = 2, bround(3.5, 0) = 4.
12771277
#'
12781278
#' @param x Column to compute on.
12791279
#' @param scale round to \code{scale} digits to the right of the decimal point when \code{scale} > 0,
1280-
#' the nearest even number when \code{scale} = 0, and `scale` digits to the left
1280+
#' the nearest even number when \code{scale} = 0, and \code{scale} digits to the left
12811281
#' of the decimal point when \code{scale} < 0.
12821282
#' @param ... further arguments to be passed to or from other methods.
12831283
#' @rdname bround
@@ -2276,8 +2276,7 @@ setMethod("n_distinct", signature(x = "Column"),
22762276
countDistinct(x, ...)
22772277
})
22782278

2279-
#' @param x a Column.
2280-
#' @rdname n
2279+
#' @rdname count
22812280
#' @name n
22822281
#' @aliases n,Column-method
22832282
#' @export
@@ -2655,7 +2654,7 @@ setMethod("expr", signature(x = "character"),
26552654
#' Formats the arguments in printf-style and returns the result as a string column.
26562655
#'
26572656
#' @param format a character object of format strings.
2658-
#' @param x a Column object.
2657+
#' @param x a Column.
26592658
#' @param ... additional Column(s).
26602659
#' @family string_funcs
26612660
#' @rdname format_string

R/pkg/R/generics.R

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,8 @@ setGeneric("coltypes<-", function(x, value) { standardGeneric("coltypes<-") })
432432
#' @export
433433
setGeneric("columns", function(x) {standardGeneric("columns") })
434434

435-
#' @rdname nrow
435+
#' @param x a GroupedData or Column.
436+
#' @rdname count
436437
#' @export
437438
setGeneric("count", function(x) { standardGeneric("count") })
438439

@@ -1071,7 +1072,7 @@ setGeneric("month", function(x) { standardGeneric("month") })
10711072
#' @export
10721073
setGeneric("months_between", function(y, x) { standardGeneric("months_between") })
10731074

1074-
#' @rdname n
1075+
#' @rdname count
10751076
#' @export
10761077
setGeneric("n", function(x) { standardGeneric("n") })
10771078

@@ -1303,6 +1304,10 @@ setGeneric("year", function(x) { standardGeneric("year") })
13031304
#' @export
13041305
setGeneric("spark.glm", function(data, formula, ...) { standardGeneric("spark.glm") })
13051306

1307+
#' @param x,y For \code{glm}: logical values indicating whether the response vector
1308+
#' and model matrix used in the fitting process should be returned as
1309+
#' components of the returned value.
1310+
#' @inheritParams stats::glm
13061311
#' @rdname glm
13071312
#' @export
13081313
setGeneric("glm")

R/pkg/R/group.R

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ setMethod("show", "GroupedData",
5959
#' Count the number of rows for each group.
6060
#' The resulting SparkDataFrame will also contain the grouping columns.
6161
#'
62-
#' @param x a GroupedData.
6362
#' @return A SparkDataFrame.
6463
#' @rdname count
6564
#' @aliases count,GroupedData-method

R/pkg/R/mllib.R

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ setMethod("spark.glm", signature(data = "SparkDataFrame", formula = "formula"),
173173
#' Fits a generalized linear model, similarly to R's glm().
174174
#' @param formula a symbolic description of the model to be fitted. Currently only a few formula
175175
#' operators are supported, including '~', '.', ':', '+', and '-'.
176-
#' @param data SparkDataFrame for training.
176+
#' @param data a SparkDataFrame or R's glm data for training.
177177
#' @param family a description of the error distribution and link function to be used in the model.
178178
#' This can be a character string naming a family function, a family function or
179179
#' the result of a call to a family function. Refer R family at
@@ -508,10 +508,10 @@ setMethod("summary", signature(object = "IsotonicRegressionModel"),
508508
#' @param formula a symbolic description of the model to be fitted. Currently only a few formula
509509
#' operators are supported, including '~', '.', ':', '+', and '-'.
510510
#' Note that the response variable of formula is empty in spark.kmeans.
511-
#' @param ... additional argument(s) passed to the method.
512511
#' @param k number of centers.
513512
#' @param maxIter maximum iteration number.
514513
#' @param initMode the initialization algorithm choosen to fit the model.
514+
#' @param ... additional argument(s) passed to the method.
515515
#' @return \code{spark.kmeans} returns a fitted k-means model.
516516
#' @rdname spark.kmeans
517517
#' @aliases spark.kmeans,SparkDataFrame,formula-method
@@ -628,8 +628,8 @@ setMethod("predict", signature(object = "KMeansModel"),
628628
#' @param data a \code{SparkDataFrame} of observations and labels for model fitting.
629629
#' @param formula a symbolic description of the model to be fitted. Currently only a few formula
630630
#' operators are supported, including '~', '.', ':', '+', and '-'.
631-
#' @param ... additional argument(s) passed to the method. Currently only \code{smoothing}.
632631
#' @param smoothing smoothing parameter.
632+
#' @param ... additional argument(s) passed to the method. Currently only \code{smoothing}.
633633
#' @return \code{spark.naiveBayes} returns a fitted naive Bayes model.
634634
#' @rdname spark.naiveBayes
635635
#' @aliases spark.naiveBayes,SparkDataFrame,formula-method
@@ -657,7 +657,7 @@ setMethod("predict", signature(object = "KMeansModel"),
657657
#' }
658658
#' @note spark.naiveBayes since 2.0.0
659659
setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "formula"),
660-
function(data, formula, smoothing = 1.0) {
660+
function(data, formula, smoothing = 1.0, ...) {
661661
formula <- paste(deparse(formula), collapse = "")
662662
jobj <- callJStatic("org.apache.spark.ml.r.NaiveBayesWrapper", "fit",
663663
formula, data@sdf, smoothing)

0 commit comments

Comments
 (0)