diff --git a/DESCRIPTION b/DESCRIPTION index 0fea684321..2a97d1d6a8 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -26,7 +26,6 @@ Imports: lazyeval, MASS, mgcv, - plyr (>= 1.7.1), reshape2, rlang (>= 0.2.1), scales (>= 0.5.0), @@ -71,6 +70,7 @@ Collate: 'aes-group-order.r' 'aes-linetype-size-shape.r' 'aes-position.r' + 'compat-plyr.R' 'utilities.r' 'aes.r' 'legend-draw.r' diff --git a/NAMESPACE b/NAMESPACE index 024c1c3cb1..5f799e6799 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -284,6 +284,7 @@ export(element_grob) export(element_line) export(element_rect) export(element_text) +export(empty) export(enexpr) export(enexprs) export(enquo) @@ -579,7 +580,6 @@ import(grid) import(gtable) import(scales) importFrom(lazyeval,f_eval) -importFrom(plyr,defaults) importFrom(rlang,.data) importFrom(rlang,enexpr) importFrom(rlang,enexprs) diff --git a/R/aes-group-order.r b/R/aes-group-order.r index fe46161f55..ad9921f2ee 100644 --- a/R/aes-group-order.r +++ b/R/aes-group-order.r @@ -33,7 +33,7 @@ #' rescale01 <- function(x) (x - min(x)) / diff(range(x)) #' ec_scaled <- data.frame( #' date = economics$date, -#' plyr::colwise(rescale01)(economics[, -(1:2)])) +#' lapply(economics[, -(1:2)], rescale01)) #' ecm <- reshape2::melt(ec_scaled, id.vars = "date") #' f <- ggplot(ecm, aes(date, value)) #' f + geom_line(aes(linetype = variable)) diff --git a/R/aes.r b/R/aes.r index 5b1c1114c7..37660768df 100644 --- a/R/aes.r +++ b/R/aes.r @@ -1,4 +1,4 @@ -#' @include utilities.r +#' @include utilities.r compat-plyr.R NULL #' Construct aesthetic mappings @@ -157,7 +157,7 @@ standardise_aes_names <- function(x) { x <- sub("color", "colour", x, fixed = TRUE) # convert old-style aesthetics names to ggplot version - plyr::revalue(x, ggplot_global$base_to_ggplot, warn_missing = FALSE) + revalue(x, ggplot_global$base_to_ggplot) } # x is a list of aesthetic mappings, as generated by aes() diff --git a/R/bench.r b/R/bench.r index 48c8c0286f..42157229fd 100644 --- a/R/bench.r +++ b/R/bench.r @@ -22,8 +22,10 @@ benchplot <- function(x) { draw <- system.time(grid.draw(grob)) times <- rbind(construct, build, render, draw)[, 1:3] + times <- rbind(times, colSums(times)) - plyr::unrowname(base::data.frame( + cbind( step = c("construct", "build", "render", "draw", "TOTAL"), - rbind(times, colSums(times)))) + mat_2_df(times) + ) } diff --git a/R/compat-plyr.R b/R/compat-plyr.R new file mode 100644 index 0000000000..898aacdf05 --- /dev/null +++ b/R/compat-plyr.R @@ -0,0 +1,346 @@ +#' Adds missing elements to a vector from a default vector +#' +#' This function appends a given named vector or list with additional elements +#' from a default vector, only adding those that does not already exist in the +#' first. +#' +#' @param x,y Named vectors or lists +#' +#' @return `x` with missing values from `y` appended +#' +#' @keywords internal +#' @noRd +#' +defaults <- function(x, y) c(x, y[setdiff(names(y), names(x))]) +# Remove rownames from data frames and matrices +unrowname <- function(x) { + if (is.data.frame(x)) { + attr(x, "row.names") <- .set_row_names(.row_names_info(x, 2L)) + } else if (is.matrix(x)) { + dimnames(x)[1] <- list(NULL) + } else { + stop("Can only remove rownames from data.frame and matrix objects", call. = FALSE) + } + x +} +#' Rename elements in a list, data.frame or vector +#' +#' This is akin to `dplyr::rename` and `plyr::rename`. It renames elements given +#' as names in the `replace` vector to the values in the `replace` vector +#' without touching elements not referenced. +#' +#' @param x A data.frame or a named vector or list +#' @param replace A named character vector. The names identifies the elements in +#' `x` that should be renamed and the values gives the new names. +#' +#' @return `x`, with new names according to `replace` +#' +#' @keywords internal +#' @noRd +#' +rename <- function(x, replace) { + current_names <- names(x) + old_names <- names(replace) + missing_names <- setdiff(old_names, current_names) + if (length(missing_names) > 0) { + replace <- replace[!old_names %in% missing_names] + old_names <- names(replace) + } + names(x)[match(old_names, current_names)] <- as.vector(replace) + x +} +# Adapted from plyr:::id_vars +# Create a unique id for elements in a single vector +id_var <- function(x, drop = FALSE) { + if (length(x) == 0) { + id <- integer() + n = 0L + } else if (!is.null(attr(x, "n")) && !drop) { + return(x) + } else if (is.factor(x) && !drop) { + x <- addNA(x, ifany = TRUE) + id <- as.integer(x) + n <- length(levels(x)) + } else { + levels <- sort(unique(x), na.last = TRUE) + id <- match(x, levels) + n <- max(id) + } + attr(id, "n") <- n + id +} +#' Create an unique integer id for each unique row in a data.frame +#' +#' Properties: +#' - `order(id)` is equivalent to `do.call(order, df)` +#' - rows containing the same data have the same value +#' - if `drop = FALSE` then room for all possibilites +#' +#' @param .variables list of variables +#' @param drop Should unused factor levels be dropped? +#' +#' @return An integer vector with attribute `n` giving the total number of +#' possible unique rows +#' +#' @keywords internal +#' @noRd +#' +id <- function(.variables, drop = FALSE) { + nrows <- NULL + if (is.data.frame(.variables)) { + nrows <- nrow(.variables) + .variables <- unclass(.variables) + } + lengths <- vapply(.variables, length, integer(1)) + .variables <- .variables[lengths != 0] + if (length(.variables) == 0) { + n <- nrows %||% 0L + id <- seq_len(n) + attr(id, "n") <- n + return(id) + } + if (length(.variables) == 1) { + return(id_var(.variables[[1]], drop = drop)) + } + ids <- rev(lapply(.variables, id_var, drop = drop)) + p <- length(ids) + ndistinct <- vapply(ids, attr, "n", FUN.VALUE = numeric(1), USE.NAMES = FALSE) + n <- prod(ndistinct) + if (n > 2^31) { + char_id <- do.call("paste", c(ids, sep = "\r")) + res <- match(char_id, unique(char_id)) + } + else { + combs <- c(1, cumprod(ndistinct[-p])) + mat <- do.call("cbind", ids) + res <- c((mat - 1L) %*% combs + 1L) + } + if (drop) { + id_var(res, drop = TRUE) + } + else { + res <- as.integer(res) + attr(res, "n") <- n + res + } +} +#' Count number of occurences for each unique combination of variables +#' +#' Each unique combination of the variables in `df` given by `vars` will be +#' identified and their occurences counted. If `wt_var` is given the counts will +#' be weighted by the values in this column. +#' +#' @param df A data.frame +#' @param vars A vector of column names. If `NULL` all columns in `df` will be +#' used +#' @param wt_var The name of a column to use as weight +#' +#' @return A data.frame with the unique combinations counted along with a `n` +#' column giving the counts +#' +#' @keywords internal +#' @noRd +#' +count <- function(df, vars = NULL, wt_var = NULL) { + df2 <- if (is.null(vars)) df else df[vars] + id <- id(df2, drop = TRUE) + u_id <- !duplicated(id) + labels <- df2[u_id, , drop = FALSE] + labels <- labels[order(id[u_id]), , drop = FALSE] + if (is.null(wt_var)) { + freq <- tabulate(id, attr(id, "n")) + } else { + wt <- .subset2(df, wt_var) + freq <- vapply(split(wt, id), sum, numeric(1)) + } + new_data_frame(c(as.list(labels), list(n = freq))) +} +# Adapted from plyr::join.keys +# Create a shared unique id across two data frames such that common variable +# combinations in the two data frames gets the same id +join_keys <- function(x, y, by) { + joint <- rbind_dfs(list(x[by], y[by])) + keys <- id(joint, drop = TRUE) + n_x <- nrow(x) + n_y <- nrow(y) + list(x = keys[seq_len(n_x)], y = keys[n_x + seq_len(n_y)], + n = attr(keys, "n")) +} +#' Replace specified values with new values, in a factor or character vector +#' +#' An easy to use substitution of elements in a string-like vector (character or +#' factor). If `x` is a character vector the matching elements will be replaced +#' directly and if `x` is a factor the matching levels will be replaced +#' +#' @param x A character or factor vector +#' @param replace A named character vector with the names corresponding to the +#' elements to replace and the values giving the replacement. +#' +#' @return A vector of the same class as `x` with the given values replaced +#' +#' @keywords internal +#' @noRd +#' +revalue <- function(x, replace) { + if (is.character(x)) { + replace <- replace[names(replace) %in% x] + if (length(replace) == 0) return(x) + x[match(names(replace), x)] <- replace + } else if (is.factor(x)) { + lev <- levels(x) + replace <- replace[names(replace) %in% lev] + if (length(replace) == 0) return(x) + lev[match(names(replace), lev)] <- replace + levels(x) <- lev + } else if (!is.null(x)) { + stop("x is not a factor or character vector", call. = FALSE) + } + x +} +# Iterate through a formula and return a quoted version +simplify_formula <- function(x) { + if (length(x) == 2 && x[[1]] == as.name("~")) { + return(simplify(x[[2]])) + } + if (length(x) < 3) + return(list(x)) + op <- x[[1]] + a <- x[[2]] + b <- x[[3]] + if (op == as.name("+") || op == as.name("*") || op == + as.name("~")) { + c(simplify(a), simplify(b)) + } + else if (op == as.name("-")) { + c(simplify(a), bquote(-.(x), list(x = simplify(b)))) + } + else { + list(x) + } +} +#' Create a quoted version of x +#' +#' This function captures the special meaning of formulas in the context of +#' facets in ggplot2, where `+` have special meaning. It works as +#' `plyr::as.quoted` but only for the special cases of `character`, `call`, and +#' `formula` input as these are the only situations relevant for ggplot2. +#' +#' @param x A formula, string, or call to be quoted +#' @param env The environment to a attach to the quoted expression. +#' +#' @keywords internal +#' @noRd +#' +as.quoted <- function(x, env = parent.frame()) { + x <- if (is.character(x)) { + lapply(x, function(x) parse(text = x)[[1]]) + } else if (is.formula(x)) { + simplify_formula(x) + } else if (is.call(x)) { + as.list(x)[-1] + } else { + stop("Only knows how to quote characters, calls, and formula", call. = FALSE) + } + attributes(x) <- list(env = env, class = 'quoted') + x +} +# round a number to a given precision +round_any <- function(x, accuracy, f = round) { + if (!is.numeric(x)) stop("x must be numeric", call. = FALSE) + f(x/accuracy) * accuracy +} +#' Bind data frames together by common column names +#' +#' This function is akin to `plyr::rbind.fill`, `dplyr::bind_rows`, and +#' `data.table::rbindlist`. It takes data frames in a list and stacks them on +#' top of each other, filling out values with `NA` if the column is missing from +#' a data.frame +#' +#' @param dfs A list of data frames +#' +#' @return A data.frame with the union of all columns from the data frames given +#' in `dfs` +#' +#' @keywords internal +#' @noRd +#' +rbind_dfs <- function(dfs) { + out <- list() + columns <- unique(unlist(lapply(dfs, names))) + nrows <- vapply(dfs, .row_names_info, integer(1), type = 2L) + total <- sum(nrows) + if (length(columns) == 0) return(new_data_frame(list(), total)) + allocated <- rep(FALSE, length(columns)) + names(allocated) <- columns + col_levels <- list() + for (df in dfs) { + new_columns <- intersect(names(df), columns[!allocated]) + for (col in new_columns) { + if (is.factor(df[[col]])) { + all_factors <- all(vapply(dfs, function(df) { + val <- .subset2(df, col) + is.null(val) || is.factor(val) + }, logical(1))) + if (all_factors) { + col_levels[[col]] <- unique(unlist(lapply(dfs, function(df) levels(.subset2(df, col))))) + } + out[[col]] <- rep(NA_character_, total) + } else { + out[[col]] <- rep(.subset2(df, col)[1][NA], total) + } + } + allocated[new_columns] <- TRUE + if (all(allocated)) break + } + pos <- c(cumsum(nrows) - nrows + 1) + for (i in seq_along(dfs)) { + df <- dfs[[i]] + rng <- seq(pos[i], length.out = nrows[i]) + for (col in names(df)) { + if (inherits(df[[col]], 'factor')) { + out[[col]][rng] <- as.character(df[[col]]) + } else { + out[[col]][rng] <- df[[col]] + } + } + } + for (col in names(col_levels)) { + out[[col]] <- factor(out[[col]], levels = col_levels[[col]]) + } + attributes(out) <- list(class = "data.frame", names = names(out), row.names = .set_row_names(total)) + out +} +#' Apply function to unique subsets of a data.frame +#' +#' This function is akin to `plyr::ddply`. It takes a single data.frame, +#' splits it by the unique combinations of the columns given in `by`, apply a +#' function to each split, and then reassembles the results into a sigle +#' data.frame again. +#' +#' @param df A data.frame +#' @param by A character vector of column names to split by +#' @param fun A function to apply to each split +#' @param ... Further arguments to `fun` +#' @param drop Should unused factor levels in the columns given in `by` be +#' dropped. +#' +#' @return A data.frame if the result of `fun` does not include the columns +#' given in `by` these will be prepended to the result. +#' +#' @keywords internal +#' @noRd +dapply <- function(df, by, fun, ..., drop = TRUE) { + grouping_cols <- .subset(df, by) + ids <- id(grouping_cols, drop = drop) + group_rows <- split(seq_len(nrow(df)), ids) + rbind_dfs(lapply(seq_along(group_rows), function(i) { + cur_data <- df_rows(df, group_rows[[i]]) + res <- fun(cur_data, ...) + if (is.null(res)) return(res) + if (length(res) == 0) return(new_data_frame()) + vars <- lapply(setNames(by, by), function(col) .subset2(cur_data, col)[1]) + if (is.matrix(res)) res <- split_matrix(res) + if (is.null(names(res))) names(res) <- paste0("V", seq_along(res)) + new_data_frame(modify_list(unclass(vars), unclass(res))) + })) +} diff --git a/R/coord-munch.r b/R/coord-munch.r index d93be2cac6..4b0bb2d8e2 100644 --- a/R/coord-munch.r +++ b/R/coord-munch.r @@ -112,8 +112,8 @@ dist_polar <- function(r, theta) { # Rename x and y columns to r and t, since we're working in polar # Note that 'slope' actually means the spiral slope, 'a' in the spiral # formula r = a * theta - lf <- plyr::rename(lf, c(x1 = "t1", x2 = "t2", y1 = "r1", y2 = "r2", - yintercept = "r_int", xintercept = "t_int"), warn_missing = FALSE) + lf <- rename(lf, c(x1 = "t1", x2 = "t2", y1 = "r1", y2 = "r2", + yintercept = "r_int", xintercept = "t_int")) # Re-normalize the theta values so that intercept for each is 0 # This is necessary for calculating spiral arc length. diff --git a/R/coord-polar.r b/R/coord-polar.r index c7d61d02c3..e65c45a0a1 100644 --- a/R/coord-polar.r +++ b/R/coord-polar.r @@ -325,9 +325,9 @@ CoordPolar <- ggproto("CoordPolar", Coord, rename_data <- function(coord, data) { if (coord$theta == "y") { - plyr::rename(data, c("y" = "theta", "x" = "r"), warn_missing = FALSE) + rename(data, c("y" = "theta", "x" = "r")) } else { - plyr::rename(data, c("y" = "r", "x" = "theta"), warn_missing = FALSE) + rename(data, c("y" = "r", "x" = "theta")) } } diff --git a/R/facet-.r b/R/facet-.r index ba06f4b235..734fbd0b16 100644 --- a/R/facet-.r +++ b/R/facet-.r @@ -91,10 +91,10 @@ Facet <- ggproto("Facet", NULL, init_scales = function(layout, x_scale = NULL, y_scale = NULL, params) { scales <- list() if (!is.null(x_scale)) { - scales$x <- plyr::rlply(max(layout$SCALE_X), x_scale$clone()) + scales$x <- lapply(seq_len(max(layout$SCALE_X)), function(i) x_scale$clone()) } if (!is.null(y_scale)) { - scales$y <- plyr::rlply(max(layout$SCALE_Y), y_scale$clone()) + scales$y <- lapply(seq_len(max(layout$SCALE_Y)), function(i) y_scale$clone()) } scales }, @@ -243,7 +243,7 @@ NO_PANEL <- -1L unique_combs <- function(df) { if (length(df) == 0) return() - unique_values <- plyr::llply(df, ulevels) + unique_values <- lapply(df, ulevels) rev(expand.grid(rev(unique_values), stringsAsFactors = FALSE, KEEP.OUT.ATTRS = TRUE)) } @@ -256,7 +256,7 @@ df.grid <- function(a, b) { i_a = seq_len(nrow(a)), i_b = seq_len(nrow(b)) ) - plyr::unrowname(cbind( + unrowname(cbind( a[indexes$i_a, , drop = FALSE], b[indexes$i_b, , drop = FALSE] )) @@ -393,7 +393,7 @@ f_as_facets <- function(f) { env <- rlang::f_env(f) %||% globalenv() # as.quoted() handles `+` specifications - vars <- plyr::as.quoted(f) + vars <- as.quoted(f) # `.` in formulas is ignored vars <- discard_dots(vars) @@ -529,7 +529,7 @@ combine_vars <- function(data, env = emptyenv(), vars = NULL, drop = TRUE) { if (length(vars) == 0) return(new_data_frame()) # For each layer, compute the facet values - values <- compact(plyr::llply(data, eval_facets, facets = vars, env = env)) + values <- compact(lapply(data, eval_facets, facets = vars, env = env)) # Form the base data.frame which contains all combinations of faceting # variables that appear in the data @@ -547,7 +547,7 @@ combine_vars <- function(data, env = emptyenv(), vars = NULL, drop = TRUE) { ) } - base <- unique(plyr::ldply(values[has_all])) + base <- unique(rbind_dfs(values[has_all])) if (!drop) { base <- unique_combs(base) } diff --git a/R/facet-grid-.r b/R/facet-grid-.r index 98fff5e346..13d213ca1f 100644 --- a/R/facet-grid-.r +++ b/R/facet-grid-.r @@ -229,11 +229,11 @@ FacetGrid <- ggproto("FacetGrid", Facet, base <- unique(base) # Create panel info dataset - panel <- plyr::id(base, drop = TRUE) + panel <- id(base, drop = TRUE) panel <- factor(panel, levels = seq_len(attr(panel, "n"))) - rows <- if (!length(names(rows))) rep(1L, length(panel)) else plyr::id(base[names(rows)], drop = TRUE) - cols <- if (!length(names(cols))) rep(1L, length(panel)) else plyr::id(base[names(cols)], drop = TRUE) + rows <- if (!length(names(rows))) rep(1L, length(panel)) else id(base[names(rows)], drop = TRUE) + cols <- if (!length(names(cols))) rep(1L, length(panel)) else id(base[names(cols)], drop = TRUE) panels <- new_data_frame(c(list(PANEL = panel, ROW = rows, COL = cols), base)) panels <- panels[order(panels$PANEL), , drop = FALSE] @@ -269,8 +269,8 @@ FacetGrid <- ggproto("FacetGrid", Facet, data_rep <- rep.int(1:nrow(data), nrow(to_add)) facet_rep <- rep(1:nrow(to_add), each = nrow(data)) - data <- plyr::unrowname(data[data_rep, , drop = FALSE]) - facet_vals <- plyr::unrowname(cbind( + data <- unrowname(data[data_rep, , drop = FALSE]) + facet_vals <- unrowname(cbind( facet_vals[data_rep, , drop = FALSE], to_add[facet_rep, , drop = FALSE])) } @@ -283,7 +283,7 @@ FacetGrid <- ggproto("FacetGrid", Facet, facet_vals[] <- lapply(facet_vals[], as.factor) facet_vals[] <- lapply(facet_vals[], addNA, ifany = TRUE) - keys <- plyr::join.keys(facet_vals, layout, by = vars) + keys <- join_keys(facet_vals, layout, by = vars) data$PANEL <- layout$PANEL[match(keys$x, keys$y)] } diff --git a/R/facet-wrap.r b/R/facet-wrap.r index 0771dd1054..8b8a29b2ee 100644 --- a/R/facet-wrap.r +++ b/R/facet-wrap.r @@ -141,11 +141,11 @@ FacetWrap <- ggproto("FacetWrap", Facet, return(layout_null()) } - base <- plyr::unrowname( + base <- unrowname( combine_vars(data, params$plot_env, vars, drop = params$drop) ) - id <- plyr::id(base, drop = TRUE) + id <- id(base, drop = TRUE) n <- attr(id, "n") dims <- wrap_dims(n, params$nrow, params$ncol) @@ -163,7 +163,7 @@ FacetWrap <- ggproto("FacetWrap", Facet, layout[c("ROW", "COL")] <- layout[c("COL", "ROW")] } - panels <- cbind(layout, plyr::unrowname(base)) + panels <- cbind(layout, unrowname(base)) panels <- panels[order(panels$PANEL), , drop = FALSE] rownames(panels) <- NULL @@ -190,13 +190,13 @@ FacetWrap <- ggproto("FacetWrap", Facet, data_rep <- rep.int(1:nrow(data), nrow(to_add)) facet_rep <- rep(1:nrow(to_add), each = nrow(data)) - data <- plyr::unrowname(data[data_rep, , drop = FALSE]) - facet_vals <- plyr::unrowname(cbind( + data <- unrowname(data[data_rep, , drop = FALSE]) + facet_vals <- unrowname(cbind( facet_vals[data_rep, , drop = FALSE], to_add[facet_rep, , drop = FALSE])) } - keys <- plyr::join.keys(facet_vals, layout, by = names(vars)) + keys <- join_keys(facet_vals, layout, by = names(vars)) data$PANEL <- layout$PANEL[match(keys$x, keys$y)] data diff --git a/R/fortify-map.r b/R/fortify-map.r index e27331c233..8c969b78b3 100644 --- a/R/fortify-map.r +++ b/R/fortify-map.r @@ -96,7 +96,9 @@ map_data <- function(map, region = ".", exact = FALSE, ...) { #' #' ia <- map_data("county", "iowa") #' mid_range <- function(x) mean(range(x)) -#' seats <- plyr::ddply(ia, "subregion", plyr::colwise(mid_range, c("lat", "long"))) +#' seats <- do.call(rbind, lapply(split(ia, ia$subregion), function(d) { +#' data.frame(lat = mid_range(d$lat), long = mid_range(d$long)) +#' })) #' ggplot(ia, aes(long, lat)) + #' geom_polygon(aes(group = group), fill = NA, colour = "grey60") + #' geom_text(aes(label = subregion), data = seats, size = 2, angle = 45) diff --git a/R/fortify-multcomp.r b/R/fortify-multcomp.r index 8aa165eeb3..eaa8d316bd 100644 --- a/R/fortify-multcomp.r +++ b/R/fortify-multcomp.r @@ -33,7 +33,7 @@ NULL #' @rdname fortify-multcomp #' @export fortify.glht <- function(model, data, ...) { - plyr::unrowname(base::data.frame( + unrowname(base::data.frame( lhs = rownames(model$linfct), rhs = model$rhs, estimate = stats::coef(model), @@ -48,7 +48,7 @@ fortify.confint.glht <- function(model, data, ...) { coef <- model$confint colnames(coef) <- to_lower_ascii(colnames(coef)) - plyr::unrowname(base::data.frame( + unrowname(base::data.frame( lhs = rownames(coef), rhs = model$rhs, coef, @@ -64,7 +64,7 @@ fortify.summary.glht <- function(model, data, ...) { model$test[c("coefficients", "sigma", "tstat", "pvalues")]) names(coef) <- c("estimate", "se", "t", "p") - plyr::unrowname(base::data.frame( + unrowname(base::data.frame( lhs = rownames(coef), rhs = model$rhs, coef, @@ -77,7 +77,7 @@ fortify.summary.glht <- function(model, data, ...) { #' @rdname fortify-multcomp #' @export fortify.cld <- function(model, data, ...) { - plyr::unrowname(base::data.frame( + unrowname(base::data.frame( lhs = names(model$mcletters$Letters), letters = model$mcletters$Letters, check.names = FALSE, diff --git a/R/fortify-spatial.r b/R/fortify-spatial.r index dd239e5427..dcf9550ef2 100644 --- a/R/fortify-spatial.r +++ b/R/fortify-spatial.r @@ -25,7 +25,7 @@ fortify.SpatialPolygonsDataFrame <- function(model, data, region = NULL, ...) { attr <- as.data.frame(model) # If not specified, split into regions based on polygons if (is.null(region)) { - coords <- plyr::ldply(model@polygons,fortify) + coords <- rbind_dfs(lapply(model@polygons,fortify)) message("Regions defined for each Polygons") } else { cp <- sp::polygons(model) @@ -42,7 +42,7 @@ fortify.SpatialPolygonsDataFrame <- function(model, data, region = NULL, ...) { #' @export #' @method fortify SpatialPolygons fortify.SpatialPolygons <- function(model, data, ...) { - plyr::ldply(model@polygons, fortify) + rbind_dfs(lapply(model@polygons, fortify)) } #' @rdname fortify.sp @@ -50,11 +50,11 @@ fortify.SpatialPolygons <- function(model, data, ...) { #' @method fortify Polygons fortify.Polygons <- function(model, data, ...) { subpolys <- model@Polygons - pieces <- plyr::ldply(seq_along(subpolys), function(i) { + pieces <- rbind_dfs(lapply(seq_along(subpolys), function(i) { df <- fortify(subpolys[[model@plotOrder[i]]]) df$piece <- i df - }) + })) pieces$order <- 1:nrow(pieces) pieces$id <- model@ID @@ -78,7 +78,7 @@ fortify.Polygon <- function(model, data, ...) { #' @export #' @method fortify SpatialLinesDataFrame fortify.SpatialLinesDataFrame <- function(model, data, ...) { - plyr::ldply(model@lines, fortify) + rbind_dfs(lapply(model@lines, fortify)) } #' @rdname fortify.sp @@ -86,11 +86,11 @@ fortify.SpatialLinesDataFrame <- function(model, data, ...) { #' @method fortify Lines fortify.Lines <- function(model, data, ...) { lines <- model@Lines - pieces <- plyr::ldply(seq_along(lines), function(i) { + pieces <- rbind_dfs(lapply(seq_along(lines), function(i) { df <- fortify(lines[[i]]) df$piece <- i df - }) + })) pieces$order <- 1:nrow(pieces) pieces$id <- model@ID diff --git a/R/geom-.r b/R/geom-.r index c5050349cd..4f19896ee2 100644 --- a/R/geom-.r +++ b/R/geom-.r @@ -81,12 +81,12 @@ Geom <- ggproto("Geom", params <- params[intersect(names(params), self$parameters())] args <- c(list(quote(data), quote(panel_params), quote(coord)), params) - plyr::dlply(data, "PANEL", function(data) { + lapply(split(data, data$PANEL), function(data) { if (empty(data)) return(zeroGrob()) panel_params <- layout$panel_params[[data$PANEL[1]]] do.call(self$draw_panel, args) - }, .drop = FALSE) + }) }, draw_panel = function(self, data, panel_params, coord, ...) { diff --git a/R/geom-dotplot.r b/R/geom-dotplot.r index aa07c35e7f..b7d66d6c9f 100644 --- a/R/geom-dotplot.r +++ b/R/geom-dotplot.r @@ -212,7 +212,7 @@ GeomDotplot <- ggproto("GeomDotplot", Geom, plyvars <- c(plyvars, "group") # Within each x, or x+group, set countidx=1,2,3, and set stackpos according to stack function - data <- plyr::ddply(data, plyvars, function(xx) { + data <- dapply(data, plyvars, function(xx) { xx$countidx <- 1:nrow(xx) xx$stackpos <- stackdots(xx$countidx) xx @@ -237,7 +237,7 @@ GeomDotplot <- ggproto("GeomDotplot", Geom, # works. They're just set to the standard x +- width/2 so that dot clusters # can be dodged like other geoms. # After position code is rewritten, each dot should have its own bounding box. - data <- plyr::ddply(data, c("group", "PANEL"), transform, + data <- dapply(data, c("group", "PANEL"), transform, ymin = min(y) - binwidth[1] / 2, ymax = max(y) + binwidth[1] / 2) diff --git a/R/geom-path.r b/R/geom-path.r index 1a222f5e76..379355c563 100644 --- a/R/geom-path.r +++ b/R/geom-path.r @@ -160,7 +160,7 @@ GeomPath <- ggproto("GeomPath", Geom, if (nrow(munched) < 2) return(zeroGrob()) # Work out whether we should use lines or segments - attr <- plyr::ddply(munched, "group", function(df) { + attr <- dapply(munched, "group", function(df) { linetype <- unique(df$linetype) new_data_frame(list( solid = identical(linetype, 1) || identical(linetype, "solid"), @@ -294,7 +294,7 @@ geom_step <- function(mapping = NULL, data = NULL, stat = "identity", #' @include geom-path.r GeomStep <- ggproto("GeomStep", GeomPath, draw_panel = function(data, panel_params, coord, direction = "hv") { - data <- plyr::ddply(data, "group", stairstep, direction = direction) + data <- dapply(data, "group", stairstep, direction = direction) GeomPath$draw_panel(data, panel_params, coord) } ) diff --git a/R/geom-rect.r b/R/geom-rect.r index 155f365425..c8d03e5121 100644 --- a/R/geom-rect.r +++ b/R/geom-rect.r @@ -37,10 +37,9 @@ GeomRect <- ggproto("GeomRect", Geom, names(data), c("x", "y", "xmin", "xmax", "ymin", "ymax") ) - polys <- plyr::alply(data, 1, function(row) { + polys <- lapply(split(data, seq_len(nrow(data))), function(row) { poly <- rect_to_poly(row$xmin, row$xmax, row$ymin, row$ymax) - aes <- as.data.frame(row[aesthetics], - stringsAsFactors = FALSE)[rep(1,5), ] + aes <- new_data_frame(row[aesthetics])[rep(1,5), ] GeomPolygon$draw_panel(cbind(poly, aes), panel_params, coord) }) diff --git a/R/geom-ribbon.r b/R/geom-ribbon.r index ab5c8385c5..2735fae254 100644 --- a/R/geom-ribbon.r +++ b/R/geom-ribbon.r @@ -94,8 +94,12 @@ GeomRibbon <- ggproto("GeomRibbon", Geom, ids <- cumsum(missing_pos) + 1 ids[missing_pos] <- NA - positions <- plyr::summarise(data, - x = c(x, rev(x)), y = c(ymax, rev(ymin)), id = c(ids, rev(ids))) + data <- unclass(data) #for faster indexing + positions <- new_data_frame(list( + x = c(data$x, rev(data$x)), + y = c(data$ymax, rev(data$ymin)), + id = c(ids, rev(ids)) + )) munched <- coord_munch(coord, positions, panel_params) ggname("geom_ribbon", polygonGrob( diff --git a/R/geom-segment.r b/R/geom-segment.r index 017d88a9de..2fd48da8a0 100644 --- a/R/geom-segment.r +++ b/R/geom-segment.r @@ -130,8 +130,7 @@ GeomSegment <- ggproto("GeomSegment", Geom, data$group <- 1:nrow(data) starts <- subset(data, select = c(-xend, -yend)) - ends <- plyr::rename(subset(data, select = c(-x, -y)), c("xend" = "x", "yend" = "y"), - warn_missing = FALSE) + ends <- rename(subset(data, select = c(-x, -y)), c("xend" = "x", "yend" = "y")) pieces <- rbind(starts, ends) pieces <- pieces[order(pieces$group),] diff --git a/R/geom-violin.r b/R/geom-violin.r index 44e92d9c1f..153f7c7101 100644 --- a/R/geom-violin.r +++ b/R/geom-violin.r @@ -105,7 +105,7 @@ GeomViolin <- ggproto("GeomViolin", Geom, params$width %||% (resolution(data$x, FALSE) * 0.9) # ymin, ymax, xmin, and xmax define the bounding rectangle for each group - plyr::ddply(data, "group", transform, + dapply(data, "group", transform, xmin = x - width / 2, xmax = x + width / 2 ) @@ -120,8 +120,8 @@ GeomViolin <- ggproto("GeomViolin", Geom, # Make sure it's sorted properly to draw the outline newdata <- rbind( - plyr::arrange(transform(data, x = xminv), y), - plyr::arrange(transform(data, x = xmaxv), -y) + transform(data, x = xminv)[order(data$y), ], + transform(data, x = xmaxv)[order(data$y, decreasing = TRUE), ] ) # Close the polygon: set first and last point the same diff --git a/R/ggplot2.r b/R/ggplot2.r index fb0eaf28eb..3e70041902 100644 --- a/R/ggplot2.r +++ b/R/ggplot2.r @@ -2,7 +2,6 @@ "_PACKAGE" #' @import scales grid gtable -#' @importFrom plyr defaults #' @importFrom stats setNames #' @importFrom rlang quo quos NULL diff --git a/R/grouping.r b/R/grouping.r index 56f38337d1..592c4235a4 100644 --- a/R/grouping.r +++ b/R/grouping.r @@ -16,12 +16,12 @@ add_group <- function(data) { disc[names(disc) %in% c("label", "PANEL")] <- FALSE if (any(disc)) { - data$group <- plyr::id(data[disc], drop = TRUE) + data$group <- id(data[disc], drop = TRUE) } else { data$group <- NO_GROUP } } else { - data$group <- plyr::id(data["group"], drop = TRUE) + data$group <- id(data["group"], drop = TRUE) } data diff --git a/R/guide-colorbar.r b/R/guide-colorbar.r index bb5035e97b..afd3ea32cf 100644 --- a/R/guide-colorbar.r +++ b/R/guide-colorbar.r @@ -241,7 +241,7 @@ guide_merge.colorbar <- function(guide, new_guide) { #' @export guide_geom.colorbar <- function(guide, layers, default_mapping) { # Layers that use this guide - guide_layers <- plyr::llply(layers, function(layer) { + guide_layers <- lapply(layers, function(layer) { matched <- matched_aes(layer, guide, default_mapping) if (length(matched) && ((is.na(layer$show.legend) || layer$show.legend))) { diff --git a/R/guide-legend.r b/R/guide-legend.r index 3f90dc26e8..2bc04d84b7 100644 --- a/R/guide-legend.r +++ b/R/guide-legend.r @@ -242,7 +242,7 @@ guide_merge.legend <- function(guide, new_guide) { #' @export guide_geom.legend <- function(guide, layers, default_mapping) { # arrange common data for vertical and horizontal guide - guide$geoms <- plyr::llply(layers, function(layer) { + guide$geoms <- lapply(layers, function(layer) { matched <- matched_aes(layer, guide, default_mapping) if (length(matched) > 0) { diff --git a/R/hexbin.R b/R/hexbin.R index daf359b037..6d6e38e5fd 100644 --- a/R/hexbin.R +++ b/R/hexbin.R @@ -7,8 +7,8 @@ hex_binwidth <- function(bins = 30, scales) { hex_bounds <- function(x, binwidth) { c( - plyr::round_any(min(x), binwidth, floor) - 1e-6, - plyr::round_any(max(x), binwidth, ceiling) + 1e-6 + round_any(min(x), binwidth, floor) - 1e-6, + round_any(max(x), binwidth, ceiling) + 1e-6 ) } diff --git a/R/labeller.r b/R/labeller.r index c29f5142af..f04faa987e 100644 --- a/R/labeller.r +++ b/R/labeller.r @@ -398,8 +398,7 @@ as_labeller <- function(x, default = label_value, multi_line = TRUE) { #' #' # In the following example, we rename the levels to the long form, #' # then apply a wrap labeller to the columns to prevent cropped text -#' msleep$conservation2 <- plyr::revalue(msleep$conservation, -#' conservation_status) +#' msleep$conservation2 <- revalue(msleep$conservation, conservation_status) #' p3 <- ggplot(msleep, aes(x = sleep_total, y = awake)) + geom_point() #' p3 + #' facet_grid(vore ~ conservation2, diff --git a/R/layer.r b/R/layer.r index e172f7fa3b..df3b5d1fa4 100644 --- a/R/layer.r +++ b/R/layer.r @@ -279,7 +279,7 @@ Layer <- ggproto("Layer", NULL, env <- new.env(parent = baseenv()) env$stat <- stat - stat_data <- plyr::quickdf(lapply(new, rlang::eval_tidy, data, env)) + stat_data <- new_data_frame(lapply(new, rlang::eval_tidy, data, env)) names(stat_data) <- names(new) # Add any new scales, if needed diff --git a/R/layout.R b/R/layout.R index c37cee60ff..6a672b85d4 100644 --- a/R/layout.R +++ b/R/layout.R @@ -270,10 +270,9 @@ scale_apply <- function(data, vars, method, scale_id, scales) { if (length(vars) == 0) return() if (nrow(data) == 0) return() - n <- length(scales) if (any(is.na(scale_id))) stop() - scale_index <- plyr::split_indices(scale_id, n) + scale_index <- unname(split(seq_along(scale_id), scale_id)) lapply(vars, function(var) { pieces <- lapply(seq_along(scales), function(i) { diff --git a/R/performance.R b/R/performance.R index 533ba04757..2657e9ac09 100644 --- a/R/performance.R +++ b/R/performance.R @@ -26,11 +26,15 @@ data.frame <- function(...) { stop('Please use `data_frame()` or `new_data_frame()` instead of `data.frame()` for better performance. See the vignette "ggplot2 internal programming guidelines" for details.', call. = FALSE) } -mat_2_df <- function(x, col_names = colnames(x), .check = FALSE) { +split_matrix <- function(x, col_names = colnames(x)) { force(col_names) x <- lapply(seq_len(ncol(x)), function(i) x[, i]) if (!is.null(col_names)) names(x) <- col_names - new_data_frame(x) + x +} + +mat_2_df <- function(x, col_names = colnames(x)) { + new_data_frame(split_matrix(x, col_names)) } df_col <- function(x, name) .subset2(x, name) diff --git a/R/plot.r b/R/plot.r index c2b1cdb53a..fc7cfe1bc8 100644 --- a/R/plot.r +++ b/R/plot.r @@ -41,7 +41,9 @@ #' gp = factor(rep(letters[1:3], each = 10)), #' y = rnorm(30) #' ) -#' ds <- plyr::ddply(df, "gp", plyr::summarise, mean = mean(y), sd = sd(y)) +#' ds <- do.call(rbind, lapply(split(df, df$gp), function(d) { +#' data.frame(mean = mean(d$y), sd = sd(d$y), gp = d$gp) +#' })) #' #' # The summary data frame ds is used to plot larger red points on top #' # of the raw data. Note that we don't need to supply `data` or `mapping` diff --git a/R/position-.r b/R/position-.r index 7704a6b58b..d45b201486 100644 --- a/R/position-.r +++ b/R/position-.r @@ -54,7 +54,7 @@ Position <- ggproto("Position", }, compute_layer = function(self, data, params, layout) { - plyr::ddply(data, "PANEL", function(data) { + dapply(data, "PANEL", function(data) { if (empty(data)) return(new_data_frame()) scales <- layout$get_scales(data$PANEL[1]) diff --git a/R/position-collide.r b/R/position-collide.r index 682c3b87c2..cc4aa0e2df 100644 --- a/R/position-collide.r +++ b/R/position-collide.r @@ -27,7 +27,7 @@ collide_setup <- function(data, width = NULL, name, strategy, width <- widths[1] } - list(data = data, width = width) + list(data = data, width = width) } collide <- function(data, width = NULL, name, strategy, @@ -53,12 +53,12 @@ collide <- function(data, width = NULL, name, strategy, # This is where the algorithm from [L. Wilkinson. Dot plots. # The American Statistician, 1999.] should be used } - + if (!is.null(data$ymax)) { - plyr::ddply(data, "xmin", strategy, ..., width = width) + dapply(data, "xmin", strategy, ..., width = width) } else if (!is.null(data$y)) { data$ymax <- data$y - data <- plyr::ddply(data, "xmin", strategy, ..., width = width) + data <- dapply(data, "xmin", strategy, ..., width = width) data$y <- data$ymax data } else { diff --git a/R/scales-.r b/R/scales-.r index c185cf95df..b8800417ca 100644 --- a/R/scales-.r +++ b/R/scales-.r @@ -74,7 +74,7 @@ scales_map_df <- function(scales, df) { mapped <- unlist(lapply(scales$scales, function(scale) scale$map_df(df = df)), recursive = FALSE) - plyr::quickdf(c(mapped, df[setdiff(names(df), names(mapped))])) + new_data_frame(c(mapped, df[setdiff(names(df), names(mapped))])) } # Transform values to cardinal representation @@ -83,7 +83,7 @@ scales_transform_df <- function(scales, df) { transformed <- unlist(lapply(scales$scales, function(s) s$transform_df(df = df)), recursive = FALSE) - plyr::quickdf(c(transformed, df[setdiff(names(df), names(transformed))])) + new_data_frame(c(transformed, df[setdiff(names(df), names(transformed))])) } # @param aesthetics A list of aesthetic-variable mappings. The name of each diff --git a/R/stat-.r b/R/stat-.r index d6d02d0599..f1b1b77985 100644 --- a/R/stat-.r +++ b/R/stat-.r @@ -86,7 +86,7 @@ Stat <- ggproto("Stat", params <- params[intersect(names(params), self$parameters())] args <- c(list(data = quote(data), scales = quote(scales)), params) - plyr::ddply(data, "PANEL", function(data) { + dapply(data, "PANEL", function(data) { scales <- layout$get_scales(data$PANEL[1]) tryCatch(do.call(self$compute_panel, args), error = function(e) { warning("Computation failed in `", snake_class(self), "()`:\n", @@ -114,7 +114,7 @@ Stat <- ggproto("Stat", ) }, stats, groups, SIMPLIFY = FALSE) - do.call(plyr::rbind.fill, stats) + rbind_dfs(stats) }, compute_group = function(self, data, scales) { diff --git a/R/stat-bin2d.r b/R/stat-bin2d.r index 376a6b595c..8ace8f25e5 100644 --- a/R/stat-bin2d.r +++ b/R/stat-bin2d.r @@ -127,7 +127,7 @@ bin2d_breaks <- function(scale, breaks = NULL, origin = NULL, binwidth = NULL, stopifnot(is.numeric(binwidth), length(binwidth) == 1) if (is.null(origin) || identical(origin, NA)) { - origin <- plyr::round_any(range[1], binwidth, floor) + origin <- round_any(range[1], binwidth, floor) } stopifnot(is.numeric(origin), length(origin) == 1) diff --git a/R/stat-bindot.r b/R/stat-bindot.r index 23bfae1f37..1c5fb1c17e 100644 --- a/R/stat-bindot.r +++ b/R/stat-bindot.r @@ -36,15 +36,15 @@ StatBindot <- ggproto("StatBindot", Stat, newdata <- densitybin(x = data$x, weight = data$weight, binwidth = binwidth, method = method) - data <- plyr::arrange(data, x) - newdata <- plyr::arrange(newdata, x) + data <- data[order(data$x), ] + newdata <- newdata[order(newdata$x), ] } else if (binaxis == "y") { newdata <- densitybin(x = data$y, weight = data$weight, binwidth = binwidth, method = method) - data <- plyr::arrange(data, y) - newdata <- plyr::arrange(newdata, x) + data <- data[order(data$y), ] + newdata <- newdata[order(newdata$x), ] } data$bin <- newdata$bin @@ -109,7 +109,12 @@ StatBindot <- ggproto("StatBindot", Stat, method = method, range = range) # Collapse each bin and get a count - data <- plyr::ddply(data, "bincenter", plyr::summarise, binwidth = binwidth[1], count = sum(weight)) + data <- dapply(data, "bincenter", function(x) { + new_data_frame(list( + binwidth = .subset2(x, "binwidth")[1], + count = sum(.subset2(x, "weight")) + )) + }) if (sum(data$count, na.rm = TRUE) != 0) { data$count[is.na(data$count)] <- 0 @@ -168,7 +173,7 @@ densitybin <- function(x, weight = NULL, binwidth = NULL, method = method, range binwidth = binwidth, weight = weight ), n = length(x)) - results <- plyr::ddply(results, "bin", function(df) { + results <- dapply(results, "bin", function(df) { df$bincenter = (min(df$x) + max(df$x)) / 2 return(df) }) diff --git a/R/stat-quantile.r b/R/stat-quantile.r index 93389d5d5e..6580144bd3 100644 --- a/R/stat-quantile.r +++ b/R/stat-quantile.r @@ -75,8 +75,8 @@ StatQuantile <- ggproto("StatQuantile", Stat, method <- match.fun(method) - plyr::ldply(quantiles, quant_pred, data = data, method = method, - formula = formula, weight = weight, grid = grid, method.args = method.args) + rbind_dfs(lapply(quantiles, quant_pred, data = data, method = method, + formula = formula, weight = weight, grid = grid, method.args = method.args)) } ) diff --git a/R/stat-sum.r b/R/stat-sum.r index b03274a4a2..cbb4bfe80b 100644 --- a/R/stat-sum.r +++ b/R/stat-sum.r @@ -42,8 +42,8 @@ StatSum <- ggproto("StatSum", Stat, group_by <- setdiff(intersect(names(data), ggplot_global$all_aesthetics), "weight") - counts <- plyr::count(data, group_by, wt_var = "weight") - counts <- plyr::rename(counts, c(freq = "n"), warn_missing = FALSE) + counts <- count(data, group_by, wt_var = "weight") + counts <- rename(counts, c(freq = "n")) counts$prop <- stats::ave(counts$n, counts$group, FUN = prop.table) counts } diff --git a/R/stat-summary-bin.R b/R/stat-summary-bin.R index 77ef6a86f5..0aa8a2dcaa 100644 --- a/R/stat-summary-bin.R +++ b/R/stat-summary-bin.R @@ -55,7 +55,7 @@ StatSummaryBin <- ggproto("StatSummaryBin", Stat, breaks <- bin2d_breaks(scales$x, breaks, origin, binwidth, bins, right = right) data$bin <- cut(data$x, breaks, include.lowest = TRUE, labels = FALSE) - out <- plyr::ddply(data, "bin", fun) + out <- dapply(data, "bin", fun) locs <- bin_loc(breaks, out$bin) out$x <- locs$mid diff --git a/R/stat-summary.r b/R/stat-summary.r index 02f570fa0d..27ed095e3f 100644 --- a/R/stat-summary.r +++ b/R/stat-summary.r @@ -161,8 +161,8 @@ StatSummary <- ggproto("StatSummary", Stat, # @param other arguments passed on to summary function # @keyword internal summarise_by_x <- function(data, summary, ...) { - summary <- plyr::ddply(data, c("group", "x"), summary, ...) - unique <- plyr::ddply(data, c("group", "x"), uniquecols) + summary <- dapply(data, c("group", "x"), summary, ...) + unique <- dapply(data, c("group", "x"), uniquecols) unique$y <- NULL merge(summary, unique, by = c("x", "group"), sort = FALSE) @@ -201,10 +201,9 @@ wrap_hmisc <- function(fun) { fun <- getExportedValue("Hmisc", fun) result <- do.call(fun, list(x = quote(x), ...)) - plyr::rename( + rename( new_data_frame(as.list(result)), - c(Median = "y", Mean = "y", Lower = "ymin", Upper = "ymax"), - warn_missing = FALSE + c(Median = "y", Mean = "y", Lower = "ymin", Upper = "ymax") ) } } diff --git a/R/utilities-matrix.r b/R/utilities-matrix.r index 27796d3be5..d0e9ed0d5d 100644 --- a/R/utilities-matrix.r +++ b/R/utilities-matrix.r @@ -15,20 +15,20 @@ cunion <- function(a, b) { interleave <- function(...) UseMethod("interleave") #' @export interleave.unit <- function(...) { - do.call("unit.c", do.call("interleave.default", plyr::llply(list(...), as.list))) + do.call("unit.c", do.call("interleave.default", lapply(list(...), as.list))) } #' @export interleave.default <- function(...) { vectors <- list(...) # Check lengths - lengths <- unique(setdiff(plyr::laply(vectors, length), 1)) + lengths <- unique(setdiff(vapply(vectors, length, integer(1)), 1L)) if (length(lengths) == 0) lengths <- 1 stopifnot(length(lengths) <= 1) # Replicate elements of length one up to correct length - singletons <- plyr::laply(vectors, length) == 1 - vectors[singletons] <- plyr::llply(vectors[singletons], rep, lengths) + singletons <- vapply(vectors, length, integer(1)) == 1L + vectors[singletons] <- lapply(vectors[singletons], rep, lengths) # Interleave vectors n <- lengths diff --git a/R/utilities.r b/R/utilities.r index 0a9037574d..b99c949a75 100644 --- a/R/utilities.r +++ b/R/utilities.r @@ -337,7 +337,15 @@ firstUpper <- function(s) { snake_class <- function(x) { snakeize(class(x)[1]) } - +#' Is a data.frame empty +#' +#' An empty data.frame is defined as either `NULL` or a data.frame with zero +#' rows or columns +#' +#' @param df A data.frame or `NULL` +#' +#' @keywords internal +#' @export empty <- function(df) { is.null(df) || nrow(df) == 0 || ncol(df) == 0 } diff --git a/man/aes_group_order.Rd b/man/aes_group_order.Rd index ff9081ab9c..7da2cbeb8d 100644 --- a/man/aes_group_order.Rd +++ b/man/aes_group_order.Rd @@ -37,7 +37,7 @@ a + geom_bar(aes(fill = factor(vs))) rescale01 <- function(x) (x - min(x)) / diff(range(x)) ec_scaled <- data.frame( date = economics$date, - plyr::colwise(rescale01)(economics[, -(1:2)])) + lapply(economics[, -(1:2)], rescale01)) ecm <- reshape2::melt(ec_scaled, id.vars = "date") f <- ggplot(ecm, aes(date, value)) f + geom_line(aes(linetype = variable)) diff --git a/man/borders.Rd b/man/borders.Rd index 212d100f5a..dfed9832c8 100644 --- a/man/borders.Rd +++ b/man/borders.Rd @@ -66,7 +66,9 @@ if (require("maps")) { ia <- map_data("county", "iowa") mid_range <- function(x) mean(range(x)) -seats <- plyr::ddply(ia, "subregion", plyr::colwise(mid_range, c("lat", "long"))) +seats <- do.call(rbind, lapply(split(ia, ia$subregion), function(d) { + data.frame(lat = mid_range(d$lat), long = mid_range(d$long)) +})) ggplot(ia, aes(long, lat)) + geom_polygon(aes(group = group), fill = NA, colour = "grey60") + geom_text(aes(label = subregion), data = seats, size = 2, angle = 45) diff --git a/man/empty.Rd b/man/empty.Rd new file mode 100644 index 0000000000..c15c7df494 --- /dev/null +++ b/man/empty.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utilities.r +\name{empty} +\alias{empty} +\title{Is a data.frame empty} +\usage{ +empty(df) +} +\arguments{ +\item{df}{A data.frame or \code{NULL}} +} +\description{ +An empty data.frame is defined as either \code{NULL} or a data.frame with zero +rows or columns +} +\keyword{internal} diff --git a/man/ggplot.Rd b/man/ggplot.Rd index 280267e665..b3afa2bac9 100644 --- a/man/ggplot.Rd +++ b/man/ggplot.Rd @@ -55,7 +55,9 @@ df <- data.frame( gp = factor(rep(letters[1:3], each = 10)), y = rnorm(30) ) -ds <- plyr::ddply(df, "gp", plyr::summarise, mean = mean(y), sd = sd(y)) +ds <- do.call(rbind, lapply(split(df, df$gp), function(d) { + data.frame(mean = mean(d$y), sd = sd(d$y), gp = d$gp) +})) # The summary data frame ds is used to plot larger red points on top # of the raw data. Note that we don't need to supply `data` or `mapping` diff --git a/man/labeller.Rd b/man/labeller.Rd index 354f2851cf..b817569b37 100644 --- a/man/labeller.Rd +++ b/man/labeller.Rd @@ -89,8 +89,7 @@ p2 + facet_grid(vore ~ conservation, labeller = labeller( # In the following example, we rename the levels to the long form, # then apply a wrap labeller to the columns to prevent cropped text -msleep$conservation2 <- plyr::revalue(msleep$conservation, - conservation_status) +msleep$conservation2 <- revalue(msleep$conservation, conservation_status) p3 <- ggplot(msleep, aes(x = sleep_total, y = awake)) + geom_point() p3 + facet_grid(vore ~ conservation2, diff --git a/tests/testthat/helper-plot-data.r b/tests/testthat/helper-plot-data.r index b79b851d14..bc1f81f2c9 100644 --- a/tests/testthat/helper-plot-data.r +++ b/tests/testthat/helper-plot-data.r @@ -3,7 +3,7 @@ cdata <- function(plot) { pieces <- ggplot_build(plot) lapply(pieces$data, function(d) { - plyr::ddply(d, "PANEL", function(panel_data) { + dapply(d, "PANEL", function(panel_data) { scales <- pieces$layout$get_scales(panel_data$PANEL[1]) panel_params <- plot$coordinates$setup_panel_params(scales$x, scales$y) plot$coordinates$transform(panel_data, panel_params) diff --git a/tests/testthat/test-fortify.r b/tests/testthat/test-fortify.r index b7370b284d..d6510493f8 100644 --- a/tests/testthat/test-fortify.r +++ b/tests/testthat/test-fortify.r @@ -31,8 +31,9 @@ test_that("spatial polygons have correct ordering", { polys2 <- rev(polys) polys2_sp <- sp::SpatialPolygons(polys2) fake_sp2 <- sp::SpatialPolygonsDataFrame(polys2_sp, fake_data) + fake_sp2_fortified <- fortify(fake_sp2) - expect_equivalent(fortify(fake_sp), plyr::arrange(fortify(fake_sp2), id, order)) + expect_equivalent(fortify(fake_sp), fake_sp2_fortified[order(fake_sp2_fortified$id, fake_sp2_fortified$order), ]) }) test_that("fortify.default proves a helpful error with class uneval", { diff --git a/tests/testthat/test-layer.r b/tests/testthat/test-layer.r index 352d41b88a..f07228adec 100644 --- a/tests/testthat/test-layer.r +++ b/tests/testthat/test-layer.r @@ -42,7 +42,7 @@ test_that("if an aes is mapped to a function that returns NULL, it is removed", df <- data_frame(x = 1:10) null <- function(...) NULL p <- cdata(ggplot(df, aes(x, null()))) - expect_identical(names(p[[1]]), c("x", "PANEL", "group")) + expect_identical(names(p[[1]]), c("PANEL", "x", "group")) }) # Data extraction --------------------------------------------------------- diff --git a/vignettes/extending-ggplot2.Rmd b/vignettes/extending-ggplot2.Rmd index 252db30a11..f9714583d9 100644 --- a/vignettes/extending-ggplot2.Rmd +++ b/vignettes/extending-ggplot2.Rmd @@ -593,7 +593,7 @@ In order for ggplot2 to know which data should go where it needs the data to be ```{r} mapping <- function(data, layout, params) { - if (plyr::empty(data)) { + if (empty(data)) { return(cbind(data, PANEL = integer(0))) } rbind( @@ -737,7 +737,7 @@ FacetTrans <- ggproto("FacetTrans", Facet, }, # Same as before map_data = function(data, layout, params) { - if (plyr::empty(data)) { + if (empty(data)) { return(cbind(data, PANEL = integer(0))) } rbind( @@ -749,7 +749,7 @@ FacetTrans <- ggproto("FacetTrans", Facet, init_scales = function(layout, x_scale = NULL, y_scale = NULL, params) { scales <- list() if (!is.null(x_scale)) { - scales$x <- plyr::rlply(max(layout$SCALE_X), x_scale$clone()) + scales$x <- lapply(seq_len(max(layout$SCALE_X)), function(i) x_scale$clone()) } if (!is.null(y_scale)) { y_scale_orig <- y_scale$clone()