@@ -548,15 +548,16 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
548548 return after_e ;
549549 case ( after_e = false , 46 ) : // .
550550 return ( ! has_dot && ! has_x && ! has_e ) ? ( has_dot = true ) : false ;
551- }
552-
553- if ( ch === "n" ) {
551+ case 110 : // n
554552 is_big_int = true ;
555-
556553 return true ;
557554 }
558555
559- return RE_NUM_LITERAL . test ( ch ) ;
556+ return (
557+ code >= 48 && code <= 57 // 0-9
558+ || code >= 97 && code <= 102 // a-f
559+ || code >= 65 && code <= 70 // A-F
560+ ) ;
560561 } ) ;
561562 if ( prefix ) num = prefix + num ;
562563
@@ -573,7 +574,7 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
573574 }
574575 num = num . replace ( / _ / g, "" ) ;
575576 }
576- if ( num . endsWith ( "n" ) ) {
577+ if ( is_big_int ) {
577578 const without_n = num . slice ( 0 , - 1 ) ;
578579 const allow_e = RE_HEX_NUMBER . test ( without_n ) ;
579580 const valid = parse_js_number ( without_n , allow_e ) ;
@@ -748,7 +749,24 @@ function tokenizer($TEXT, filename, html5_comments, shebang) {
748749 return next_token ;
749750 } ) ;
750751
751- var read_name = with_eof_error ( "Unterminated identifier name" , function ( ) {
752+ var read_name = function ( ) {
753+ let start = S . pos , end = start - 1 , ch = 'c' ;
754+
755+ while (
756+ ( ch = S . text . charAt ( ++ end ) )
757+ && ( ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' )
758+ ) ;
759+
760+ if ( end > start + 1 && ch && ch !== '\\' && ! is_identifier_char ( ch ) ) {
761+ S . pos += end - start ;
762+ S . col += end - start ;
763+ return S . text . slice ( start , S . pos ) ;
764+ }
765+
766+ return read_name_hard ( ) ;
767+ } ;
768+
769+ var read_name_hard = with_eof_error ( "Unterminated identifier name" , function ( ) {
752770 var name = [ ] , ch , escaped = false ;
753771 var read_escaped_identifier_char = function ( ) {
754772 escaped = true ;
0 commit comments