Skip to content

Op assignment indention #99

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Jul 28, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ Collate:
'nested.R'
'nested_to_tree.R'
'parsed.R'
'token.R'
'relevel.R'
'rules-line_break.R'
'rules-other.R'
Expand Down
2 changes: 0 additions & 2 deletions R/get_transformers.R
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ get_transformers_nested <- function(
partial(indent_curly, indent_by = indent_by),
partial(indent_op, indent_by = indent_by),
partial(indent_without_paren, indent_by = indent_by),
partial(indent_assign, indent_by = indent_by),
get_transformers_flat(strict, start_comments_with_one_space),
remove_space_after_unary_pm_nested,
set_space_before_comments,
Expand Down Expand Up @@ -112,7 +111,6 @@ get_transformers_nested <- function(
line_break = line_break_manipulators,
space = space_manipulators,
token = token_manipulators,
eol = strip_eol_spaces,
NULL
)
}
Expand Down
20 changes: 4 additions & 16 deletions R/modify_pd.R
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ indent_curly <- function(pd, indent_by) {

#' @rdname update_indention
indent_op <- function(pd, indent_by, token = c(math_token,
"SPECIAL-PIPE")) {
"SPECIAL-PIPE",
"LEFT_ASSIGN")) {
indent_indices <- compute_indent_indices(pd, token, indent_last = TRUE)
pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by
pd
}

#' @describeIn update_indention Same as indent_op, but only indents one token
#' after `token`, not all remaining.
indent_assign <- function(pd, indent_by, token = c("LEFT_ASSIGN", "
EQ_ASSIGN")) {
indent_assign <- function(pd, indent_by, token = NULL) {
indent_indices <- compute_indent_indices(pd, token, indent_last = TRUE)
pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by
pd
Expand All @@ -43,7 +43,7 @@ indent_assign <- function(pd, indent_by, token = c("LEFT_ASSIGN", "
indent_without_paren <- function(pd, indent_by = 2) {
nrow <- nrow(pd)
if (!(pd$token[1] %in% c("IF", "FOR", "WHILE"))) return(pd)
if (pd$child[[nrow]]$token[1] == "'{'") return(pd)
if (pd$lag_newlines[nrow] == 0) return(pd)
pd$indent[nrow] <- indent_by
pd
}
Expand Down Expand Up @@ -110,15 +110,3 @@ set_multi_line <- function(pd) {
token_is_multi_line <- function(pd) {
any(pd$multi_line, pd$lag_newlines > 0)
}


#' Strip EOL spaces
#'
#' Remove end-of-line spaces.
#' @param pd_flat A flat parse table.
#' @return A nested parse table.
strip_eol_spaces <- function(pd_flat) {
idx <- lead(pd_flat$lag_newlines, default = 0) != 0
pd_flat$spaces[idx] <- 0
pd_flat
}
21 changes: 0 additions & 21 deletions R/nested.R
Original file line number Diff line number Diff line change
Expand Up @@ -69,26 +69,6 @@ special_and <- function(text) {
paste0("SPECIAL-", text)
}


#' lookup which new tokens were created from "SPECIAL"
#'
#' @param regex A regular expression pattern to search for.
#' @importFrom purrr map_chr
lookup_new_special <- function(regex = NA) {
new_special <- c("PIPE", "IN", "OTHER")

potential_regex <- grep(regex, new_special, value = TRUE, ignore.case = TRUE)
if (is.na(regex)) {
mapping <- new_special
} else if (length(potential_regex) > 0) {
mapping <- potential_regex
} else {
return(NA)
}
map_chr(mapping, special_and)
}


#' Add information about previous / next token to each terminal
#'
#' @param pd_flat A flat parse table.
Expand Down Expand Up @@ -131,7 +111,6 @@ set_spaces <- function(spaces_after_prefix, force_one) {
n_of_spaces <- rep(1, length(spaces_after_prefix))
} else {
n_of_spaces <- pmax(spaces_after_prefix, 1L)
n_of_spaces[spaces_after_prefix == 0L] <- 0L
}
n_of_spaces
}
Expand Down
57 changes: 47 additions & 10 deletions R/relevel.R
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,56 @@ flatten_operators <- function(pd_nested) {
post_visit(c(flatten_operators_one))
}


#' Flatten one level of nesting with its child
#'
#' Flattening is done in two ways. We can flatten a parse table by moving
#' the left hand token of an operator one level up. Or doing that with the
#' right hand token.
#' @param pd_nested A nested parse table.
#' @include token.R
flatten_operators_one <- function(pd_nested) {
token <- c("'+'", "'-'", special_token, "'/'", "'*'")
token_pos <- which(pd_nested$token %in% token)
if (length(token_pos) == 0) return(pd_nested)
stopifnot(length(token_pos) == 1)
pd_token_left <- c(special_token, math_token)
pd_token_right <- c(special_token, left_assignment_token, "'+'", "'-'")
bound <- pd_nested %>%
flatten_pd(pd_token_left, left = TRUE) %>%
flatten_pd(pd_token_right, left = FALSE)
bound
}


lhs_pos <- token_pos - 1L
if (lhs_pos < 1) return(pd_nested)
if (!any(pd_nested$child[[lhs_pos]]$token %in% token)) return(pd_nested)
#' Flatten a parse table
#'
#' Flattens a parse table if certain tokens occur in this table or its child,
#' either flattening from left or from right. If one of `token` is present in
#' `pd_nested` and one of `child_token` is present in one of the children next
#' to `token` in `pd_nested`, the nested parse table is flattened. Otherwise, it
#' is returned unodified.
#' @param pd_nested A nested parse table.
#' @param token A character vector with tokens of which at least one has to
#' occur in `pd_nested` in order to flatten it.
#' @param child_token A character vector of tokens of which at least one has to
#' occur in the child in order to flatten the parse table.
#' @param left Flag that indicates whether the parse table should be flattened
#' from left or from right.
flatten_pd <- function(pd_nested, token, child_token = token, left = TRUE) {
token_pos <- which(pd_nested$token[-1] %in% token) + 1
if (length(token_pos) == 0) return(pd_nested)
pos <- token_pos[ifelse(left, 1, length(token_pos))] + ifelse(left, -1L, 1L)
if (pos < 1) return(pd_nested)
if (!any(pd_nested$child[[pos]]$token[-1] %in% child_token)) return(pd_nested)
bind_with_child(pd_nested, pos)
}

#' Bind a parse table with one of its children
#'
#' Bind a parse table with one of its children and return the **unordered**
#' parse table (that is, rows are not arranged according to line1 / line2).
#' @param pd_nested A nested parse table.
#' @param pos The position of the child to bind.
bind_with_child <- function(pd_nested, pos) {
pd_nested %>%
slice(-lhs_pos) %>%
bind_rows(pd_nested$child[[lhs_pos]]) %>%
slice(-pos) %>%
bind_rows(pd_nested$child[[pos]]) %>%
arrange(line1, col1)
}

18 changes: 3 additions & 15 deletions R/rules-spacing.R
Original file line number Diff line number Diff line change
@@ -1,18 +1,4 @@
math_token <- c("'+'", "'-'", "'*'", "'/'", "'^'")

#' @include nested.R
special_token <- lookup_new_special()

op_token <- c(
math_token,
special_token,
"AND", "AND2", "EQ", "EQ_ASSIGN",
"GE", "GT", "LE", "LEFT_ASSIGN", "LT", "NE", "OR", "OR2", "RIGHT_ASSIGN",
"EQ_SUB", "ELSE"
)



#' @include token.R
add_space_around_op <- function(pd_flat) {
op_after <- pd_flat$token %in% op_token
op_before <- lead(op_after, default = FALSE)
Expand All @@ -23,6 +9,7 @@ add_space_around_op <- function(pd_flat) {
pd_flat
}

#' @include token.R
set_space_around_op <- function(pd_flat) {
op_after <- pd_flat$token %in% op_token
if (!any(op_after)) return(pd_flat)
Expand All @@ -33,6 +20,7 @@ set_space_around_op <- function(pd_flat) {
}

# depreciated!
#' @include token.R
remove_space_after_unary_pm <- function(pd_flat) {
op_pm <- c("'+'", "'-'")
op_pm_unary_after <- c(op_pm, op_token, "'('", "','")
Expand Down
3 changes: 2 additions & 1 deletion R/serialize.R
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ serialize_parse_data_nested <- function(pd_nested) {
unlist() %>%
paste0(collapse = "") %>%
strsplit("\n", fixed = TRUE) %>%
.[[1L]]
.[[1L]] %>%
trimws(which = "right")
out
}

Expand Down
5 changes: 0 additions & 5 deletions R/serialized_tests.R
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ style_indent_round <- function(text) {
line_break = NULL,
space = partial(indent_round, indent_by = 2),
token = NULL,
eol = strip_eol_spaces,
NULL
)
transformed_text <- parse_transform_serialize(text, transformers)
Expand All @@ -157,7 +156,6 @@ style_empty <- function(text) {
line_break = NULL,
space = NULL,
token = NULL,
eol = strip_eol_spaces,
NULL
)
transformed_text <- parse_transform_serialize(text, transformers)
Expand All @@ -173,7 +171,6 @@ style_indent_curly <- function(text) {
line_break = NULL,
space = partial(indent_curly, indent_by = 2),
token = NULL,
eol = strip_eol_spaces,
NULL
)
transformed_text <- parse_transform_serialize(text, transformers)
Expand All @@ -190,7 +187,6 @@ style_indent_curly_round <- function(text) {
space = c(partial(indent_curly, indent_by = 2),
partial(indent_round, indent_by = 2)),
token = NULL,
eol = strip_eol_spaces,
NULL
)

Expand All @@ -209,7 +205,6 @@ style_op <- function(text) {
line_break = NULL,
space = partial(indent_op, indent_by = 2),
token = NULL,
eol = strip_eol_spaces,
NULL
)

Expand Down
58 changes: 58 additions & 0 deletions R/token.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
token <- tribble(
~text, ~class, ~token,
"&" , "logical" , "AND",
"&&" , "logical" , "AND2",
"|" , "logical" , "OR",
"||" , "logical" , "OR2",
">" , "logical" , "GT",
"<" , "logical" , "LT",
"<=" , "logical" , "LE",
">=" , "logical" , "GE",
"!=" , "logical" , "NE",
"==" , "logical" , "EQ",
"=" , "assign_left" , "EQ_ASSIGN",
"<-" , "assign_left" , "LEFT_ASSIGN",
"->" , "assign_right", "RIGHT_ASSIGN",
"+" , "math" , "'+'",
"-" , "math" , "'-'",
"*" , "math" , "'*'",
"/" , "math" , "'/'",
"^" , "math" , "'^'"
)

math_token <- token$token[token$class == "math"]
logical_token <- token$token[token$class == "logical"]
left_assignment_token <- token$token[token$class == "assign_left"]
right_assignment_token <- token$token[token$class == "assign_right"]

#' lookup which new tokens were created from "SPECIAL"
#'
#' @param regex A regular expression pattern to search for.
#' @importFrom purrr map_chr
lookup_new_special <- function(regex = NA) {
new_special <- c("PIPE", "IN", "OTHER")

potential_regex <- grep(regex, new_special, value = TRUE, ignore.case = TRUE)
if (is.na(regex)) {
mapping <- new_special
} else if (length(potential_regex) > 0) {
mapping <- potential_regex
} else {
return(NA)
}
map_chr(mapping, special_and)
}

special_token <- lookup_new_special()

op_token <- c(
math_token,
special_token,
logical_token,
left_assignment_token,
right_assignment_token,
"EQ_SUB", "ELSE"
)



4 changes: 1 addition & 3 deletions R/transform.R
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,6 @@ apply_transformers <- function(pd_nested, transformers) {
c(set_multi_line))

transformed_all <- pre_visit(transformed_updated_multi_line,
c(transformers$space,
transformers$token,
transformers$eol))
c(transformers$space, transformers$token))
transformed_all
}
17 changes: 17 additions & 0 deletions man/bind_with_child.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 16 additions & 0 deletions man/flatten_operators_one.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

27 changes: 27 additions & 0 deletions man/flatten_pd.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/lookup_new_special.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading