Skip to content

Commit 9eed596

Browse files
committed
Import TokenType members in Lexer.kt
No-op change here that just cleans up redundant mentions of `TokenType.` in Lexer.kt
1 parent ad8fa8e commit 9eed596

File tree

1 file changed

+46
-44
lines changed
  • src/commonMain/kotlin/org/kson/parser

1 file changed

+46
-44
lines changed

src/commonMain/kotlin/org/kson/parser/Lexer.kt

Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package org.kson.parser
33
import org.kson.collections.ImmutableList
44
import org.kson.collections.toImmutableList
55
import org.kson.collections.toImmutableMap
6+
import org.kson.parser.TokenType.*
67

78
const val EMBED_DELIM_CHAR = '%'
89
const val EMBED_DELIMITER = "$EMBED_DELIM_CHAR$EMBED_DELIM_CHAR"
@@ -13,9 +14,9 @@ val embedDelimChars = setOf(EMBED_DELIM_CHAR, EMBED_DELIM_ALT_CHAR)
1314

1415
private val KEYWORDS =
1516
mapOf(
16-
"null" to TokenType.NULL,
17-
"true" to TokenType.TRUE,
18-
"false" to TokenType.FALSE
17+
"null" to NULL,
18+
"true" to TRUE,
19+
"false" to FALSE
1920
).toImmutableMap()
2021

2122
/**
@@ -233,7 +234,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
233234
if (gapFree) {
234235
emptySet()
235236
} else {
236-
setOf(TokenType.WHITESPACE, TokenType.COMMENT)
237+
setOf(WHITESPACE, COMMENT)
237238
}
238239
)
239240

@@ -248,7 +249,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
248249
scan()
249250
}
250251

251-
addToken(TokenType.EOF, Lexeme("", sourceScanner.currentLocation()), "")
252+
addToken(EOF, Lexeme("", sourceScanner.currentLocation()), "")
252253

253254
return tokens.toList()
254255
}
@@ -261,13 +262,13 @@ class Lexer(source: String, gapFree: Boolean = false) {
261262
while (isWhitespace(sourceScanner.peek()) && !sourceScanner.eof()) {
262263
sourceScanner.advance()
263264
}
264-
addLiteralToken(TokenType.WHITESPACE)
265+
addLiteralToken(WHITESPACE)
265266
return
266267
}
267268

268269
// a "minus sign" followed by whitespace is actually a list dash
269270
if (char == '-' && (isWhitespace(sourceScanner.peek()) || sourceScanner.eof())) {
270-
addLiteralToken(TokenType.LIST_DASH)
271+
addLiteralToken(LIST_DASH)
271272
return
272273
}
273274

@@ -276,25 +277,25 @@ class Lexer(source: String, gapFree: Boolean = false) {
276277
val commentText = comment()
277278
currentCommentLines.add(commentText)
278279
}
279-
'{' -> addLiteralToken(TokenType.CURLY_BRACE_L)
280-
'}' -> addLiteralToken(TokenType.CURLY_BRACE_R)
281-
'[' -> addLiteralToken(TokenType.SQUARE_BRACKET_L)
282-
']' -> addLiteralToken(TokenType.SQUARE_BRACKET_R)
283-
'<' -> addLiteralToken(TokenType.ANGLE_BRACKET_L)
284-
'>' -> addLiteralToken(TokenType.ANGLE_BRACKET_R)
285-
':' -> addLiteralToken(TokenType.COLON)
286-
',' -> addLiteralToken(TokenType.COMMA)
280+
'{' -> addLiteralToken(CURLY_BRACE_L)
281+
'}' -> addLiteralToken(CURLY_BRACE_R)
282+
'[' -> addLiteralToken(SQUARE_BRACKET_L)
283+
']' -> addLiteralToken(SQUARE_BRACKET_R)
284+
'<' -> addLiteralToken(ANGLE_BRACKET_L)
285+
'>' -> addLiteralToken(ANGLE_BRACKET_R)
286+
':' -> addLiteralToken(COLON)
287+
',' -> addLiteralToken(COMMA)
287288
'"', '\'' -> {
288-
addLiteralToken(TokenType.STRING_OPEN_QUOTE)
289+
addLiteralToken(STRING_OPEN_QUOTE)
289290
string(char)
290291
}
291292
EMBED_DELIM_CHAR, EMBED_DELIM_ALT_CHAR -> {
292293
// look for the required second embed delim char
293294
if (sourceScanner.peek() == char) {
294295
sourceScanner.advance()
295-
addLiteralToken(TokenType.EMBED_OPEN_DELIM)
296+
addLiteralToken(EMBED_OPEN_DELIM)
296297
} else {
297-
addLiteralToken(TokenType.EMBED_DELIM_PARTIAL)
298+
addLiteralToken(EMBED_DELIM_PARTIAL)
298299
}
299300
embeddedBlock(char)
300301
}
@@ -309,7 +310,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
309310
identifier()
310311
}
311312
else -> {
312-
addLiteralToken(TokenType.ILLEGAL_CHAR)
313+
addLiteralToken(ILLEGAL_CHAR)
313314
}
314315
}
315316
}
@@ -330,7 +331,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
330331
while (sourceScanner.peek() != '\n' && !sourceScanner.eof()) sourceScanner.advance()
331332

332333
val commentLexeme = sourceScanner.extractLexeme()
333-
return Token(TokenType.COMMENT, commentLexeme, commentLexeme.text, emptyList())
334+
return Token(COMMENT, commentLexeme, commentLexeme.text, emptyList())
334335
}
335336

336337
/**
@@ -351,7 +352,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
351352
while (isAlphaNumeric(sourceScanner.peek())) sourceScanner.advance()
352353

353354
val lexeme = sourceScanner.extractLexeme()
354-
val type: TokenType = KEYWORDS[lexeme.text] ?: TokenType.IDENTIFIER
355+
val type: TokenType = KEYWORDS[lexeme.text] ?: IDENTIFIER
355356
addToken(type, lexeme, lexeme.text)
356357
}
357358

@@ -366,15 +367,15 @@ class Lexer(source: String, gapFree: Boolean = false) {
366367
&& !isWhitespace(nextStringChar)
367368
) {
368369
if (hasUntokenizedStringCharacters) {
369-
addLiteralToken(TokenType.STRING)
370+
addLiteralToken(STRING)
370371
hasUntokenizedStringCharacters = false
371372
}
372373
// advance past the illegal char and tokenize it
373374
sourceScanner.advance()
374-
addLiteralToken(TokenType.STRING_ILLEGAL_CONTROL_CHARACTER)
375+
addLiteralToken(STRING_ILLEGAL_CONTROL_CHARACTER)
375376
} else if (nextStringChar == '\\') {
376377
if (hasUntokenizedStringCharacters) {
377-
addLiteralToken(TokenType.STRING)
378+
addLiteralToken(STRING)
378379
hasUntokenizedStringCharacters = false
379380
}
380381

@@ -391,14 +392,14 @@ class Lexer(source: String, gapFree: Boolean = false) {
391392
}
392393
sourceScanner.advance()
393394
}
394-
addLiteralToken(TokenType.STRING_UNICODE_ESCAPE)
395+
addLiteralToken(STRING_UNICODE_ESCAPE)
395396
} else {
396397
// otherwise, this must be a one-char string escape, provided we're
397398
// not up against EOF
398399
if (!sourceScanner.eof()) {
399400
sourceScanner.advance()
400401
}
401-
addLiteralToken(TokenType.STRING_ESCAPE)
402+
addLiteralToken(STRING_ESCAPE)
402403
}
403404
} else {
404405
sourceScanner.advance()
@@ -407,15 +408,15 @@ class Lexer(source: String, gapFree: Boolean = false) {
407408
}
408409

409410
if (hasUntokenizedStringCharacters) {
410-
addLiteralToken(TokenType.STRING)
411+
addLiteralToken(STRING)
411412
}
412413

413414
if (sourceScanner.eof()) {
414415
return
415416
} else {
416417
// not at EOF, so we must be looking at the quote that ends this string
417418
sourceScanner.advance()
418-
addLiteralToken(TokenType.STRING_CLOSE_QUOTE)
419+
addLiteralToken(STRING_CLOSE_QUOTE)
419420
}
420421
}
421422

@@ -425,13 +426,13 @@ class Lexer(source: String, gapFree: Boolean = false) {
425426
while (isInlineWhitespace(sourceScanner.peek())) {
426427
sourceScanner.advance()
427428
}
428-
addLiteralToken(TokenType.WHITESPACE)
429+
addLiteralToken(WHITESPACE)
429430
}
430431

431432
if (sourceScanner.peek() == '\n') {
432433
// no embed tag on this block
433434
sourceScanner.advance()
434-
addLiteralToken(TokenType.EMBED_PREAMBLE_NEWLINE)
435+
addLiteralToken(EMBED_PREAMBLE_NEWLINE)
435436
} else if (sourceScanner.eof()) {
436437
return
437438
} else {
@@ -445,7 +446,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
445446
// extract our embed tag (note: may be empty, that's supported)
446447
val embedTagLexeme = sourceScanner.extractLexeme()
447448
addToken(
448-
TokenType.EMBED_TAG, embedTagLexeme,
449+
EMBED_TAG, embedTagLexeme,
449450
// trim any trailing whitespace from the embed tag's value
450451
embedTagLexeme.text.trim()
451452
)
@@ -454,14 +455,14 @@ class Lexer(source: String, gapFree: Boolean = false) {
454455
if (sourceScanner.peek() == delimChar && sourceScanner.peekNext() == delimChar) {
455456
sourceScanner.advance()
456457
sourceScanner.advance()
457-
addLiteralToken(TokenType.EMBED_CLOSE_DELIM)
458+
addLiteralToken(EMBED_CLOSE_DELIM)
458459
return
459460
}
460461

461462
// consume the newline from after this embed tag
462463
if (sourceScanner.peek() == '\n') {
463464
sourceScanner.advance()
464-
addLiteralToken(TokenType.EMBED_PREAMBLE_NEWLINE)
465+
addLiteralToken(EMBED_PREAMBLE_NEWLINE)
465466
} else if (sourceScanner.eof()) {
466467
return
467468
}
@@ -520,7 +521,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
520521
trimmedEmbedBlockContent
521522
}
522523

523-
addToken(TokenType.EMBED_CONTENT, embedBlockLexeme, embedTokenValue)
524+
addToken(EMBED_CONTENT, embedBlockLexeme, embedTokenValue)
524525

525526
/**
526527
* We scanned everything that wasn't an [TokenType.EMBED_CLOSE_DELIM] into our embed content,
@@ -532,7 +533,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
532533
// process our closing delimChar pair
533534
sourceScanner.advance()
534535
sourceScanner.advance()
535-
addLiteralToken(TokenType.EMBED_CLOSE_DELIM)
536+
addLiteralToken(EMBED_CLOSE_DELIM)
536537
}
537538
}
538539

@@ -573,7 +574,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
573574
|| sourceScanner.peek() == '+'
574575
|| sourceScanner.peek() == '-'
575576
|| sourceScanner.peek() == '.') sourceScanner.advance()
576-
addLiteralToken(TokenType.NUMBER)
577+
addLiteralToken(NUMBER)
577578
}
578579

579580
/**
@@ -613,13 +614,14 @@ class Lexer(source: String, gapFree: Boolean = false) {
613614
private data class CommentMetadata(val comments: List<String>, val lookaheadTokens: List<Token>)
614615
private fun commentMetadataForCurrentToken(currentTokenType: TokenType): CommentMetadata {
615616
// comments don't get associated with these types
616-
if (currentTokenType == TokenType.COMMENT
617-
|| currentTokenType == TokenType.WHITESPACE
618-
|| currentTokenType == TokenType.EMBED_PREAMBLE_NEWLINE
619-
|| currentTokenType == TokenType.STRING
620-
|| currentTokenType == TokenType.STRING_ESCAPE
621-
|| currentTokenType == TokenType.STRING_UNICODE_ESCAPE
622-
|| currentTokenType == TokenType.STRING_ILLEGAL_CONTROL_CHARACTER) {
617+
if (currentTokenType == COMMENT
618+
|| currentTokenType == WHITESPACE
619+
|| currentTokenType == EMBED_PREAMBLE_NEWLINE
620+
|| currentTokenType == STRING
621+
|| currentTokenType == STRING_ESCAPE
622+
|| currentTokenType == STRING_UNICODE_ESCAPE
623+
|| currentTokenType == STRING_ILLEGAL_CONTROL_CHARACTER
624+
) {
623625
return CommentMetadata(emptyList(), emptyList())
624626
}
625627

@@ -635,7 +637,7 @@ class Lexer(source: String, gapFree: Boolean = false) {
635637
sourceScanner.advance()
636638
}
637639
val whitespaceLexeme = sourceScanner.extractLexeme()
638-
trailingCommentTokens.add(Token(TokenType.WHITESPACE, whitespaceLexeme, whitespaceLexeme.text, emptyList()))
640+
trailingCommentTokens.add(Token(WHITESPACE, whitespaceLexeme, whitespaceLexeme.text, emptyList()))
639641
}
640642
val trailingComment = if (sourceScanner.peek() == '#') {
641643
val commentToken = extractCommentToken()

0 commit comments

Comments
 (0)