@@ -921,12 +921,14 @@ impl<'a> StringReader<'a> {
921
921
if string == "_" {
922
922
token:: Underscore
923
923
} else {
924
- let is_mod_name = self . curr_is ( ':' ) && self . nextch_is ( ':' ) ;
925
-
926
924
// FIXME: perform NFKC normalization here. (Issue #2253)
927
- token:: Ident ( str_to_ident ( string) , is_mod_name)
925
+ if self . curr_is ( ':' ) && self . nextch_is ( ':' ) {
926
+ token:: Ident ( str_to_ident ( string) , token:: ModName )
927
+ } else {
928
+ token:: Ident ( str_to_ident ( string) , token:: Plain )
929
+ }
928
930
}
929
- } )
931
+ } ) ;
930
932
}
931
933
932
934
if is_dec_digit ( c) {
@@ -937,8 +939,11 @@ impl<'a> StringReader<'a> {
937
939
match ( c. unwrap ( ) , self . nextch ( ) , self . nextnextch ( ) ) {
938
940
( '\x00' , Some ( 'n' ) , Some ( 'a' ) ) => {
939
941
let ast_ident = self . scan_embedded_hygienic_ident ( ) ;
940
- let is_mod_name = self . curr_is ( ':' ) && self . nextch_is ( ':' ) ;
941
- return token:: Ident ( ast_ident, is_mod_name) ;
942
+ return if self . curr_is ( ':' ) && self . nextch_is ( ':' ) {
943
+ token:: Ident ( ast_ident, token:: ModName )
944
+ } else {
945
+ token:: Ident ( ast_ident, token:: Plain )
946
+ } ;
942
947
}
943
948
_ => { }
944
949
}
@@ -1056,7 +1061,7 @@ impl<'a> StringReader<'a> {
1056
1061
str_to_ident ( lifetime_name)
1057
1062
} ) ;
1058
1063
let keyword_checking_token =
1059
- & token:: Ident ( keyword_checking_ident, false ) ;
1064
+ & token:: Ident ( keyword_checking_ident, token :: Plain ) ;
1060
1065
let last_bpos = self . last_pos ;
1061
1066
if keyword_checking_token. is_keyword ( token:: keywords:: Self ) {
1062
1067
self . err_span_ ( start,
@@ -1434,7 +1439,7 @@ mod test {
1434
1439
assert_eq ! ( string_reader. next_token( ) . tok, token:: Whitespace ) ;
1435
1440
let tok1 = string_reader. next_token ( ) ;
1436
1441
let tok2 = TokenAndSpan {
1437
- tok : token:: Ident ( id, false ) ,
1442
+ tok : token:: Ident ( id, token :: Plain ) ,
1438
1443
sp : Span { lo : BytePos ( 21 ) , hi : BytePos ( 23 ) , expn_id : NO_EXPANSION } } ;
1439
1444
assert_eq ! ( tok1, tok2) ;
1440
1445
assert_eq ! ( string_reader. next_token( ) . tok, token:: Whitespace ) ;
@@ -1443,7 +1448,7 @@ mod test {
1443
1448
// read another token:
1444
1449
let tok3 = string_reader. next_token ( ) ;
1445
1450
let tok4 = TokenAndSpan {
1446
- tok : token:: Ident ( str_to_ident ( "main" ) , false ) ,
1451
+ tok : token:: Ident ( str_to_ident ( "main" ) , token :: Plain ) ,
1447
1452
sp : Span { lo : BytePos ( 24 ) , hi : BytePos ( 28 ) , expn_id : NO_EXPANSION } } ;
1448
1453
assert_eq ! ( tok3, tok4) ;
1449
1454
// the lparen is already read:
@@ -1458,39 +1463,45 @@ mod test {
1458
1463
}
1459
1464
}
1460
1465
1461
- // make the identifier by looking up the string in the interner
1466
+ # [ cfg ( stage0 ) ]
1462
1467
fn mk_ident ( id : & str , is_mod_name : bool ) -> token:: Token {
1463
- token:: Ident ( str_to_ident ( id) , is_mod_name)
1468
+ token:: Ident ( str_to_ident ( id) , is_mod_name)
1469
+ }
1470
+
1471
+ // make the identifier by looking up the string in the interner
1472
+ #[ cfg( not( stage0) ) ]
1473
+ fn mk_ident ( id : & str , style : token:: IdentStyle ) -> token:: Token {
1474
+ token:: Ident ( str_to_ident ( id) , style)
1464
1475
}
1465
1476
1466
1477
#[ test] fn doublecolonparsing ( ) {
1467
1478
check_tokenization ( setup ( & mk_sh ( ) , "a b" . to_string ( ) ) ,
1468
- vec ! ( mk_ident( "a" , false ) ,
1469
- token:: Whitespace ,
1470
- mk_ident( "b" , false ) ) ) ;
1479
+ vec ! [ mk_ident( "a" , token :: Plain ) ,
1480
+ token:: Whitespace ,
1481
+ mk_ident( "b" , token :: Plain ) ] ) ;
1471
1482
}
1472
1483
1473
1484
#[ test] fn dcparsing_2 ( ) {
1474
1485
check_tokenization ( setup ( & mk_sh ( ) , "a::b" . to_string ( ) ) ,
1475
- vec ! ( mk_ident( "a" , true ) ,
1476
- token:: ModSep ,
1477
- mk_ident( "b" , false ) ) ) ;
1486
+ vec ! [ mk_ident( "a" , token :: ModName ) ,
1487
+ token:: ModSep ,
1488
+ mk_ident( "b" , token :: Plain ) ] ) ;
1478
1489
}
1479
1490
1480
1491
#[ test] fn dcparsing_3 ( ) {
1481
1492
check_tokenization ( setup ( & mk_sh ( ) , "a ::b" . to_string ( ) ) ,
1482
- vec ! ( mk_ident( "a" , false ) ,
1483
- token:: Whitespace ,
1484
- token:: ModSep ,
1485
- mk_ident( "b" , false ) ) ) ;
1493
+ vec ! [ mk_ident( "a" , token :: Plain ) ,
1494
+ token:: Whitespace ,
1495
+ token:: ModSep ,
1496
+ mk_ident( "b" , token :: Plain ) ] ) ;
1486
1497
}
1487
1498
1488
1499
#[ test] fn dcparsing_4 ( ) {
1489
1500
check_tokenization ( setup ( & mk_sh ( ) , "a:: b" . to_string ( ) ) ,
1490
- vec ! ( mk_ident( "a" , true ) ,
1491
- token:: ModSep ,
1492
- token:: Whitespace ,
1493
- mk_ident( "b" , false ) ) ) ;
1501
+ vec ! [ mk_ident( "a" , token :: ModName ) ,
1502
+ token:: ModSep ,
1503
+ token:: Whitespace ,
1504
+ mk_ident( "b" , token :: Plain ) ] ) ;
1494
1505
}
1495
1506
1496
1507
#[ test] fn character_a ( ) {
0 commit comments