Skip to content

Commit 82f58ef

Browse files
Merge pull request #218 from lorenzwalthert/start_token2
- Remove outdated line and col information (#218).
2 parents 9997020 + ebf96dd commit 82f58ef

9 files changed

+24
-20
lines changed

R/initialize.R

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ initialize_attributes <- function(pd_flat) {
99
init_pd <-
1010
initialize_newlines(pd_flat) %>%
1111
initialize_spaces() %>%
12+
remove_line_col() %>%
1213
initialize_multi_line() %>%
1314
initialize_indention_ref_id() %>%
1415
initialize_indent() %>%
@@ -37,6 +38,11 @@ initialize_spaces <- function(pd_flat) {
3738
pd_flat
3839
}
3940

41+
remove_line_col <- function(pd_flat) {
42+
pd_flat[c("line1", "line2", "col1", "col2")] <- NULL
43+
pd_flat
44+
}
45+
4046
#' @describeIn initialize_attributes Initializes `multi_line`.
4147
initialize_multi_line <- function(pd_flat) {
4248
nrow <- nrow(pd_flat)

R/nested.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,8 +176,8 @@ combine_children <- function(child, internal_child) {
176176
#' Get the start right
177177
#'
178178
#' On what line does the first token occur?
179-
#' @param pd A parse table.
179+
#' @param pd_nested A nested parse table.
180180
#' @return The line number on which the first token occurs.
181-
start_on_line <- function(pd) {
182-
pd$line1[1]
181+
find_start_line <- function(pd_nested) {
182+
pd_nested$line1[1]
183183
}

R/serialize.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
#'
33
#' Collapses a flattened parse table into character vector representation.
44
#' @param flattened_pd A flattened parse table.
5-
serialize_parse_data_flattened <- function(flattened_pd) {
6-
flattened_pd$lag_newlines[1] <- flattened_pd$line1[1] - 1
7-
5+
#' @param start_line The line number on which the code starts.
6+
serialize_parse_data_flattened <- function(flattened_pd, start_line = 1) {
7+
flattened_pd$lag_newlines[1] <- start_line - 1
88
res <- with(flattened_pd,
99
paste0(collapse = "",
1010
map(lag_newlines, add_newlines), map(lag_spaces, add_spaces), text)

R/token-create.R

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,6 @@ create_tokens <- function(tokens,
4545
spaces = spaces,
4646
multi_line = rep(FALSE, len_text),
4747
indention_ref_id = indention_ref_ids,
48-
line1 = rep(NA, len_text),
49-
line2 = rep(NA, len_text),
50-
col1 = rep(NA, len_text),
51-
col2 = rep(NA, len_text),
5248
indent = indents,
5349
child = rep(list(NULL), len_text)
5450
)

R/transform.R

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ make_transformer <- function(transformers) {
8484
#' @inheritParams apply_transformers
8585
parse_transform_serialize <- function(text, transformers) {
8686
pd_nested <- compute_parse_data_nested(text)
87-
87+
start_line <- find_start_line(pd_nested)
8888
if (nrow(pd_nested) == 0) {
8989
warning(
9090
"Text to style did not contain any tokens. Returning empty string.",
@@ -98,7 +98,8 @@ parse_transform_serialize <- function(text, transformers) {
9898
enrich_terminals(transformers$use_raw_indention) %>%
9999
apply_ref_indention()
100100

101-
serialized_transformed_text <- serialize_parse_data_flattened(flattened_pd)
101+
serialized_transformed_text <-
102+
serialize_parse_data_flattened(flattened_pd, start_line = start_line)
102103
serialized_transformed_text
103104
}
104105

R/visit.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ enrich_terminals <- function(flattened_pd, use_raw_indention = FALSE) {
138138
flattened_pd$spaces <- NULL # depreciate spaces
139139
flattened_pd <- choose_indention(flattened_pd, use_raw_indention)
140140
flattened_pd$line1 <-
141-
cumsum(flattened_pd$lag_newlines) + flattened_pd$line1[1]
141+
cumsum(flattened_pd$lag_newlines)
142142

143143
flattened_pd$newlines <- lead(flattened_pd$lag_newlines, default = 0L)
144144
flattened_pd$nchar <- nchar(flattened_pd$text, type = "width")

man/start_on_line.Rd renamed to man/find_start_line.Rd

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

man/serialize_parse_data_flattened.Rd

Lines changed: 3 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

tests/testthat/test-create_token.R

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@ test_that("can create a token that has relevant columns", {
44
pd_names <- c(
55
"token", "text", "short", "lag_newlines", "newlines", "pos_id",
66
"parent", "token_before", "token_after", "id", "terminal", "internal",
7-
"spaces", "multi_line", "indention_ref_id", "line1", "line2",
8-
"col1", "col2", "indent", "child"
7+
"spaces", "multi_line", "indention_ref_id", "indent", "child"
98
)
109

1110
expect_equal(

0 commit comments

Comments
 (0)