diff --git a/DESCRIPTION b/DESCRIPTION index 68e928087..f0aa77b24 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -40,6 +40,7 @@ Collate: 'nested.R' 'nested_to_tree.R' 'parsed.R' + 'token.R' 'relevel.R' 'rules-line_break.R' 'rules-other.R' diff --git a/R/get_transformers.R b/R/get_transformers.R index 730a23c4c..adb2fb7f6 100644 --- a/R/get_transformers.R +++ b/R/get_transformers.R @@ -83,7 +83,6 @@ get_transformers_nested <- function( partial(indent_curly, indent_by = indent_by), partial(indent_op, indent_by = indent_by), partial(indent_without_paren, indent_by = indent_by), - partial(indent_assign, indent_by = indent_by), get_transformers_flat(strict, start_comments_with_one_space), remove_space_after_unary_pm_nested, set_space_before_comments, @@ -112,7 +111,6 @@ get_transformers_nested <- function( line_break = line_break_manipulators, space = space_manipulators, token = token_manipulators, - eol = strip_eol_spaces, NULL ) } diff --git a/R/modify_pd.R b/R/modify_pd.R index 7d0b7e295..b9d2e6040 100644 --- a/R/modify_pd.R +++ b/R/modify_pd.R @@ -23,7 +23,8 @@ indent_curly <- function(pd, indent_by) { #' @rdname update_indention indent_op <- function(pd, indent_by, token = c(math_token, - "SPECIAL-PIPE")) { + "SPECIAL-PIPE", + "LEFT_ASSIGN")) { indent_indices <- compute_indent_indices(pd, token, indent_last = TRUE) pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by pd @@ -31,8 +32,7 @@ indent_op <- function(pd, indent_by, token = c(math_token, #' @describeIn update_indention Same as indent_op, but only indents one token #' after `token`, not all remaining. -indent_assign <- function(pd, indent_by, token = c("LEFT_ASSIGN", " - EQ_ASSIGN")) { +indent_assign <- function(pd, indent_by, token = NULL) { indent_indices <- compute_indent_indices(pd, token, indent_last = TRUE) pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by pd @@ -43,7 +43,7 @@ indent_assign <- function(pd, indent_by, token = c("LEFT_ASSIGN", " indent_without_paren <- function(pd, indent_by = 2) { nrow <- nrow(pd) if (!(pd$token[1] %in% c("IF", "FOR", "WHILE"))) return(pd) - if (pd$child[[nrow]]$token[1] == "'{'") return(pd) + if (pd$lag_newlines[nrow] == 0) return(pd) pd$indent[nrow] <- indent_by pd } @@ -110,15 +110,3 @@ set_multi_line <- function(pd) { token_is_multi_line <- function(pd) { any(pd$multi_line, pd$lag_newlines > 0) } - - -#' Strip EOL spaces -#' -#' Remove end-of-line spaces. -#' @param pd_flat A flat parse table. -#' @return A nested parse table. -strip_eol_spaces <- function(pd_flat) { - idx <- lead(pd_flat$lag_newlines, default = 0) != 0 - pd_flat$spaces[idx] <- 0 - pd_flat -} diff --git a/R/nested.R b/R/nested.R index 2c8070ba0..2f05750dc 100644 --- a/R/nested.R +++ b/R/nested.R @@ -69,26 +69,6 @@ special_and <- function(text) { paste0("SPECIAL-", text) } - -#' lookup which new tokens were created from "SPECIAL" -#' -#' @param regex A regular expression pattern to search for. -#' @importFrom purrr map_chr -lookup_new_special <- function(regex = NA) { - new_special <- c("PIPE", "IN", "OTHER") - - potential_regex <- grep(regex, new_special, value = TRUE, ignore.case = TRUE) - if (is.na(regex)) { - mapping <- new_special - } else if (length(potential_regex) > 0) { - mapping <- potential_regex - } else { - return(NA) - } - map_chr(mapping, special_and) -} - - #' Add information about previous / next token to each terminal #' #' @param pd_flat A flat parse table. @@ -131,7 +111,6 @@ set_spaces <- function(spaces_after_prefix, force_one) { n_of_spaces <- rep(1, length(spaces_after_prefix)) } else { n_of_spaces <- pmax(spaces_after_prefix, 1L) - n_of_spaces[spaces_after_prefix == 0L] <- 0L } n_of_spaces } diff --git a/R/relevel.R b/R/relevel.R index a0876ca47..178714de4 100644 --- a/R/relevel.R +++ b/R/relevel.R @@ -12,19 +12,56 @@ flatten_operators <- function(pd_nested) { post_visit(c(flatten_operators_one)) } - +#' Flatten one level of nesting with its child +#' +#' Flattening is done in two ways. We can flatten a parse table by moving +#' the left hand token of an operator one level up. Or doing that with the +#' right hand token. +#' @param pd_nested A nested parse table. +#' @include token.R flatten_operators_one <- function(pd_nested) { - token <- c("'+'", "'-'", special_token, "'/'", "'*'") - token_pos <- which(pd_nested$token %in% token) - if (length(token_pos) == 0) return(pd_nested) - stopifnot(length(token_pos) == 1) + pd_token_left <- c(special_token, math_token) + pd_token_right <- c(special_token, left_assignment_token, "'+'", "'-'") + bound <- pd_nested %>% + flatten_pd(pd_token_left, left = TRUE) %>% + flatten_pd(pd_token_right, left = FALSE) + bound +} + - lhs_pos <- token_pos - 1L - if (lhs_pos < 1) return(pd_nested) - if (!any(pd_nested$child[[lhs_pos]]$token %in% token)) return(pd_nested) +#' Flatten a parse table +#' +#' Flattens a parse table if certain tokens occur in this table or its child, +#' either flattening from left or from right. If one of `token` is present in +#' `pd_nested` and one of `child_token` is present in one of the children next +#' to `token` in `pd_nested`, the nested parse table is flattened. Otherwise, it +#' is returned unodified. +#' @param pd_nested A nested parse table. +#' @param token A character vector with tokens of which at least one has to +#' occur in `pd_nested` in order to flatten it. +#' @param child_token A character vector of tokens of which at least one has to +#' occur in the child in order to flatten the parse table. +#' @param left Flag that indicates whether the parse table should be flattened +#' from left or from right. +flatten_pd <- function(pd_nested, token, child_token = token, left = TRUE) { + token_pos <- which(pd_nested$token[-1] %in% token) + 1 + if (length(token_pos) == 0) return(pd_nested) + pos <- token_pos[ifelse(left, 1, length(token_pos))] + ifelse(left, -1L, 1L) + if (pos < 1) return(pd_nested) + if (!any(pd_nested$child[[pos]]$token[-1] %in% child_token)) return(pd_nested) + bind_with_child(pd_nested, pos) +} +#' Bind a parse table with one of its children +#' +#' Bind a parse table with one of its children and return the **unordered** +#' parse table (that is, rows are not arranged according to line1 / line2). +#' @param pd_nested A nested parse table. +#' @param pos The position of the child to bind. +bind_with_child <- function(pd_nested, pos) { pd_nested %>% - slice(-lhs_pos) %>% - bind_rows(pd_nested$child[[lhs_pos]]) %>% + slice(-pos) %>% + bind_rows(pd_nested$child[[pos]]) %>% arrange(line1, col1) } + diff --git a/R/rules-spacing.R b/R/rules-spacing.R index cfcd1e723..1e9f34a21 100644 --- a/R/rules-spacing.R +++ b/R/rules-spacing.R @@ -1,18 +1,4 @@ -math_token <- c("'+'", "'-'", "'*'", "'/'", "'^'") - -#' @include nested.R -special_token <- lookup_new_special() - -op_token <- c( - math_token, - special_token, - "AND", "AND2", "EQ", "EQ_ASSIGN", - "GE", "GT", "LE", "LEFT_ASSIGN", "LT", "NE", "OR", "OR2", "RIGHT_ASSIGN", - "EQ_SUB", "ELSE" -) - - - +#' @include token.R add_space_around_op <- function(pd_flat) { op_after <- pd_flat$token %in% op_token op_before <- lead(op_after, default = FALSE) @@ -23,6 +9,7 @@ add_space_around_op <- function(pd_flat) { pd_flat } +#' @include token.R set_space_around_op <- function(pd_flat) { op_after <- pd_flat$token %in% op_token if (!any(op_after)) return(pd_flat) @@ -33,6 +20,7 @@ set_space_around_op <- function(pd_flat) { } # depreciated! +#' @include token.R remove_space_after_unary_pm <- function(pd_flat) { op_pm <- c("'+'", "'-'") op_pm_unary_after <- c(op_pm, op_token, "'('", "','") diff --git a/R/serialize.R b/R/serialize.R index d8ebabe9a..6efe8d68f 100644 --- a/R/serialize.R +++ b/R/serialize.R @@ -41,7 +41,8 @@ serialize_parse_data_nested <- function(pd_nested) { unlist() %>% paste0(collapse = "") %>% strsplit("\n", fixed = TRUE) %>% - .[[1L]] + .[[1L]] %>% + trimws(which = "right") out } diff --git a/R/serialized_tests.R b/R/serialized_tests.R index 8cac75340..a175e1fca 100644 --- a/R/serialized_tests.R +++ b/R/serialized_tests.R @@ -139,7 +139,6 @@ style_indent_round <- function(text) { line_break = NULL, space = partial(indent_round, indent_by = 2), token = NULL, - eol = strip_eol_spaces, NULL ) transformed_text <- parse_transform_serialize(text, transformers) @@ -157,7 +156,6 @@ style_empty <- function(text) { line_break = NULL, space = NULL, token = NULL, - eol = strip_eol_spaces, NULL ) transformed_text <- parse_transform_serialize(text, transformers) @@ -173,7 +171,6 @@ style_indent_curly <- function(text) { line_break = NULL, space = partial(indent_curly, indent_by = 2), token = NULL, - eol = strip_eol_spaces, NULL ) transformed_text <- parse_transform_serialize(text, transformers) @@ -190,7 +187,6 @@ style_indent_curly_round <- function(text) { space = c(partial(indent_curly, indent_by = 2), partial(indent_round, indent_by = 2)), token = NULL, - eol = strip_eol_spaces, NULL ) @@ -209,7 +205,6 @@ style_op <- function(text) { line_break = NULL, space = partial(indent_op, indent_by = 2), token = NULL, - eol = strip_eol_spaces, NULL ) diff --git a/R/token.R b/R/token.R new file mode 100644 index 000000000..927ac9571 --- /dev/null +++ b/R/token.R @@ -0,0 +1,58 @@ +token <- tribble( + ~text, ~class, ~token, + "&" , "logical" , "AND", + "&&" , "logical" , "AND2", + "|" , "logical" , "OR", + "||" , "logical" , "OR2", + ">" , "logical" , "GT", + "<" , "logical" , "LT", + "<=" , "logical" , "LE", + ">=" , "logical" , "GE", + "!=" , "logical" , "NE", + "==" , "logical" , "EQ", + "=" , "assign_left" , "EQ_ASSIGN", + "<-" , "assign_left" , "LEFT_ASSIGN", + "->" , "assign_right", "RIGHT_ASSIGN", + "+" , "math" , "'+'", + "-" , "math" , "'-'", + "*" , "math" , "'*'", + "/" , "math" , "'/'", + "^" , "math" , "'^'" +) + +math_token <- token$token[token$class == "math"] +logical_token <- token$token[token$class == "logical"] +left_assignment_token <- token$token[token$class == "assign_left"] +right_assignment_token <- token$token[token$class == "assign_right"] + +#' lookup which new tokens were created from "SPECIAL" +#' +#' @param regex A regular expression pattern to search for. +#' @importFrom purrr map_chr +lookup_new_special <- function(regex = NA) { + new_special <- c("PIPE", "IN", "OTHER") + + potential_regex <- grep(regex, new_special, value = TRUE, ignore.case = TRUE) + if (is.na(regex)) { + mapping <- new_special + } else if (length(potential_regex) > 0) { + mapping <- potential_regex + } else { + return(NA) + } + map_chr(mapping, special_and) +} + +special_token <- lookup_new_special() + +op_token <- c( + math_token, + special_token, + logical_token, + left_assignment_token, + right_assignment_token, + "EQ_SUB", "ELSE" +) + + + diff --git a/R/transform.R b/R/transform.R index b442da40e..54e0de332 100644 --- a/R/transform.R +++ b/R/transform.R @@ -112,8 +112,6 @@ apply_transformers <- function(pd_nested, transformers) { c(set_multi_line)) transformed_all <- pre_visit(transformed_updated_multi_line, - c(transformers$space, - transformers$token, - transformers$eol)) + c(transformers$space, transformers$token)) transformed_all } diff --git a/man/bind_with_child.Rd b/man/bind_with_child.Rd new file mode 100644 index 000000000..a436f3d6a --- /dev/null +++ b/man/bind_with_child.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{bind_with_child} +\alias{bind_with_child} +\title{Bind a parse table with one of its children} +\usage{ +bind_with_child(pd_nested, pos) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{pos}{The position of the child to bind.} +} +\description{ +Bind a parse table with one of its children and return the \strong{unordered} +parse table (that is, rows are not arranged according to line1 / line2). +} diff --git a/man/flatten_operators_one.Rd b/man/flatten_operators_one.Rd new file mode 100644 index 000000000..619f17372 --- /dev/null +++ b/man/flatten_operators_one.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{flatten_operators_one} +\alias{flatten_operators_one} +\title{Flatten one level of nesting with its child} +\usage{ +flatten_operators_one(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table.} +} +\description{ +Flattening is done in two ways. We can flatten a parse table by moving +the left hand token of an operator one level up. Or doing that with the +right hand token. +} diff --git a/man/flatten_pd.Rd b/man/flatten_pd.Rd new file mode 100644 index 000000000..44d74771b --- /dev/null +++ b/man/flatten_pd.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{flatten_pd} +\alias{flatten_pd} +\title{Flatten a parse table} +\usage{ +flatten_pd(pd_nested, token, child_token = token, left = TRUE) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{token}{A character vector with tokens of which at least one has to +occur in \code{pd_nested} in order to flatten it.} + +\item{child_token}{A character vector of tokens of which at least one has to +occur in the child in order to flatten the parse table.} + +\item{left}{Flag that indicates whether the parse table should be flattened +from left or from right.} +} +\description{ +Flattens a parse table if certain tokens occur in this table or its child, +either flattening from left or from right. If one of \code{token} is present in +\code{pd_nested} and one of \code{child_token} is present in one of the children next +to \code{token} in \code{pd_nested}, the nested parse table is flattened. Otherwise, it +is returned unodified. +} diff --git a/man/lookup_new_special.Rd b/man/lookup_new_special.Rd index 8cab680d8..6746e4db1 100644 --- a/man/lookup_new_special.Rd +++ b/man/lookup_new_special.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/nested.R +% Please edit documentation in R/token.R \name{lookup_new_special} \alias{lookup_new_special} \title{lookup which new tokens were created from "SPECIAL"} diff --git a/man/strip_eol_spaces.Rd b/man/strip_eol_spaces.Rd deleted file mode 100644 index 503d8cd6f..000000000 --- a/man/strip_eol_spaces.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/modify_pd.R -\name{strip_eol_spaces} -\alias{strip_eol_spaces} -\title{Strip EOL spaces} -\usage{ -strip_eol_spaces(pd_flat) -} -\arguments{ -\item{pd_flat}{A flat parse table.} -} -\value{ -A nested parse table. -} -\description{ -Remove end-of-line spaces. -} diff --git a/man/update_indention.Rd b/man/update_indention.Rd index 10401fa8f..af9bb0f66 100644 --- a/man/update_indention.Rd +++ b/man/update_indention.Rd @@ -13,10 +13,9 @@ indent_round(pd, indent_by) indent_curly(pd, indent_by) -indent_op(pd, indent_by, token = c(math_token, "SPECIAL-PIPE")) +indent_op(pd, indent_by, token = c(math_token, "SPECIAL-PIPE", "LEFT_ASSIGN")) -indent_assign(pd, indent_by, token = c("LEFT_ASSIGN", - "\\n EQ_ASSIGN")) +indent_assign(pd, indent_by, token = NULL) indent_without_paren(pd, indent_by = 2) } diff --git a/tests/testthat/indention_multiple/overall-out.R b/tests/testthat/indention_multiple/overall-out.R index 2f7f3ff22..55f400060 100644 --- a/tests/testthat/indention_multiple/overall-out.R +++ b/tests/testthat/indention_multiple/overall-out.R @@ -1,5 +1,5 @@ #' this function does -#' +#' #' @param x a parameter. #' indented comments a <- function(x) { diff --git a/tests/testthat/indention_operators/pipe_and_assignment-in.R b/tests/testthat/indention_operators/pipe_and_assignment-in.R new file mode 100644 index 000000000..e6cbe6e92 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment-in.R @@ -0,0 +1,8 @@ +a <- +b() %>% + q() %>% + g() + +a <- b() %>% + c()%>% +ggg() diff --git a/tests/testthat/indention_operators/pipe_and_assignment-in_tree b/tests/testthat/indention_operators/pipe_and_assignment-in_tree new file mode 100644 index 000000000..a79b0ee01 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment-in_tree @@ -0,0 +1,43 @@ +ROOT (token: short_text [lag_newlines/spaces] {id}) + ¦--expr: [0/0] {30} + ¦ ¦--expr: [0/4] {3} + ¦ ¦ °--SYMBOL: a [0/0] {1} + ¦ ¦--LEFT_ASSIGN: <- [0/0] {2} + ¦ ¦--expr: [1/1] {10} + ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {5} + ¦ ¦ ¦--'(': ( [0/0] {6} + ¦ ¦ °--')': ) [0/0] {8} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {11} + ¦ ¦--expr: [1/1] {18} + ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {13} + ¦ ¦ ¦--'(': ( [0/0] {14} + ¦ ¦ °--')': ) [0/0] {16} + ¦ ¦--SPECIAL-PIPE: %>% [0/5] {19} + ¦ °--expr: [1/0] {27} + ¦ ¦--expr: [0/0] {24} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {22} + ¦ ¦--'(': ( [0/0] {23} + ¦ °--')': ) [0/0] {25} + °--expr: [2/0] {63} + ¦--expr: [0/1] {37} + ¦ °--SYMBOL: a [0/0] {35} + ¦--LEFT_ASSIGN: <- [0/4] {36} + ¦--expr: [0/1] {43} + ¦ ¦--expr: [0/0] {40} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {38} + ¦ ¦--'(': ( [0/0] {39} + ¦ °--')': ) [0/0] {41} + ¦--SPECIAL-PIPE: %>% [0/2] {44} + ¦--expr: [1/0] {51} + ¦ ¦--expr: [0/0] {48} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {46} + ¦ ¦--'(': ( [0/0] {47} + ¦ °--')': ) [0/0] {49} + ¦--SPECIAL-PIPE: %>% [0/0] {52} + °--expr: [1/0] {60} + ¦--expr: [0/0] {57} + ¦ °--SYMBOL_FUNCTION_CALL: ggg [0/0] {55} + ¦--'(': ( [0/0] {56} + °--')': ) [0/0] {58} diff --git a/tests/testthat/indention_operators/pipe_and_assignment-out.R b/tests/testthat/indention_operators/pipe_and_assignment-out.R new file mode 100644 index 000000000..de72fdec5 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment-out.R @@ -0,0 +1,8 @@ +a <- + b() %>% + q() %>% + g() + +a <- b() %>% + c() %>% + ggg() diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_math-in.R b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in.R new file mode 100644 index 000000000..4005fce03 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in.R @@ -0,0 +1,5 @@ +q <- a+ + - 3 + +2+ +g()%>% + k() diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree new file mode 100644 index 000000000..926bcaceb --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree @@ -0,0 +1,27 @@ +ROOT (token: short_text [lag_newlines/spaces] {id}) + °--expr: [0/0] {37} + ¦--expr: [0/1] {3} + ¦ °--SYMBOL: q [0/0] {1} + ¦--LEFT_ASSIGN: <- [0/2] {2} + ¦--expr: [0/0] {6} + ¦ °--SYMBOL: a [0/0] {4} + ¦--'+': + [0/2] {5} + ¦--expr: [1/1] {12} + ¦ ¦--'-': - [0/1] {8} + ¦ °--expr: [0/0] {10} + ¦ °--NUM_CONST: 3 [0/0] {9} + ¦--'+': + [0/0] {11} + ¦--expr: [1/0] {16} + ¦ °--NUM_CONST: 2 [0/0] {15} + ¦--'+': + [0/0] {17} + ¦--expr: [1/0] {25} + ¦ ¦--expr: [0/0] {22} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {20} + ¦ ¦--'(': ( [0/0] {21} + ¦ °--')': ) [0/0] {23} + ¦--SPECIAL-PIPE: %>% [0/3] {26} + °--expr: [1/0] {33} + ¦--expr: [0/0] {30} + ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {28} + ¦--'(': ( [0/0] {29} + °--')': ) [0/0] {31} diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_math-out.R b/tests/testthat/indention_operators/pipe_and_assignment_and_math-out.R new file mode 100644 index 000000000..172735d82 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_math-out.R @@ -0,0 +1,5 @@ +q <- a + + -3 + + 2 + + g() %>% + k() diff --git a/tests/testthat/indention_operators/pipe_simple-out.R b/tests/testthat/indention_operators/pipe_simple-out.R index 981cd3d4d..cddf83aa0 100644 --- a/tests/testthat/indention_operators/pipe_simple-out.R +++ b/tests/testthat/indention_operators/pipe_simple-out.R @@ -1,7 +1,8 @@ a %>% b() %>% c() %>% - d(1 + e (sin(f))) %>% + d(1 + e(sin(f))) %>% g_out() -a <- function(jon_the_pipe) {} +a <- function(jon_the_pipe) { +} diff --git a/tests/testthat/test-indention_operators.R b/tests/testthat/test-indention_operators.R index c646b75b4..0b74ce199 100644 --- a/tests/testthat/test-indention_operators.R +++ b/tests/testthat/test-indention_operators.R @@ -3,7 +3,7 @@ context("indention operators") test_that("pipe is indended correctly", { expect_warning(test_collection("indention_operators", "pipe", - transformer = style_op, + transformer = style_text, write_back = TRUE), NA) }) diff --git a/tests/testthat/unary_spacing/unary_complex-in_tree b/tests/testthat/unary_spacing/unary_complex-in_tree index 00a1e4bad..6ca662b5d 100644 --- a/tests/testthat/unary_spacing/unary_complex-in_tree +++ b/tests/testthat/unary_spacing/unary_complex-in_tree @@ -16,9 +16,10 @@ ROOT (token: short_text [lag_newlines/spaces] {id}) ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {27} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {10} ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {25} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/1] {11} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {13} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {12} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {15} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/1] {11} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {13} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {12} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/2] {14} ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {17} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 11 [0/0] {16} @@ -55,9 +56,10 @@ ROOT (token: short_text [lag_newlines/spaces] {id}) ¦ ¦ ¦ ¦ °--expr: [0/0] {76} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {59} ¦ ¦ ¦ ¦ ¦--expr: [0/0] {74} - ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {60} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {62} - ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {64} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {60} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {62} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {61} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {63} ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {66} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 11 [0/0] {65} diff --git a/tests/testthat/unary_spacing/unary_indention-in_tree b/tests/testthat/unary_spacing/unary_indention-in_tree index b2121442f..e351293d4 100644 --- a/tests/testthat/unary_spacing/unary_indention-in_tree +++ b/tests/testthat/unary_spacing/unary_indention-in_tree @@ -25,9 +25,10 @@ ROOT (token: short_text [lag_newlines/spaces] {id}) ¦ ¦ °--NUM_CONST: 1 [0/0] {35} ¦ ¦--'-': - [0/0] {37} ¦ °--expr: [1/0] {57} - ¦ ¦--'-': - [0/0] {41} - ¦ ¦--expr: [0/1] {43} - ¦ ¦ °--NUM_CONST: 1 [0/0] {42} + ¦ ¦--expr: [0/1] {45} + ¦ ¦ ¦--'-': - [0/0] {41} + ¦ ¦ °--expr: [0/0] {43} + ¦ ¦ °--NUM_CONST: 1 [0/0] {42} ¦ ¦--'/': / [0/2] {44} ¦ ¦--expr: [1/2] {48} ¦ ¦ °--NUM_CONST: 27 [0/0] {47} diff --git a/tests/testthat/unary_spacing/unary_simple-in_tree b/tests/testthat/unary_spacing/unary_simple-in_tree index b91f8ffcc..71e17f56f 100644 --- a/tests/testthat/unary_spacing/unary_simple-in_tree +++ b/tests/testthat/unary_spacing/unary_simple-in_tree @@ -4,9 +4,10 @@ ROOT (token: short_text [lag_newlines/spaces] {id}) ¦ °--NUM_CONST: 1 [0/0] {1} ¦--'+': + [0/0] {3} ¦--expr: [0/0] {12} - ¦ ¦--'-': - [0/0] {4} - ¦ ¦--expr: [0/0] {6} - ¦ ¦ °--NUM_CONST: 1 [0/0] {5} + ¦ ¦--expr: [0/0] {8} + ¦ ¦ ¦--'-': - [0/0] {4} + ¦ ¦ °--expr: [0/0] {6} + ¦ ¦ °--NUM_CONST: 1 [0/0] {5} ¦ ¦--'/': / [0/0] {7} ¦ °--expr: [0/0] {10} ¦ °--NUM_CONST: 2 [0/0] {9}