@@ -56,7 +56,7 @@ static int
5656tok_readline_raw (struct tok_state * tok )
5757{
5858 do {
59- if (!tok_reserve_buf (tok , BUFSIZ )) {
59+ if (!_PyLexer_tok_reserve_buf (tok , BUFSIZ )) {
6060 return 0 ;
6161 }
6262 int n_chars = (int )(tok -> end - tok -> inp );
@@ -86,7 +86,7 @@ tok_readline_recode(struct tok_state *tok) {
8686 if (line == NULL ) {
8787 line = PyObject_CallNoArgs (tok -> decoding_readline );
8888 if (line == NULL ) {
89- error_ret (tok );
89+ _PyTokenizer_error_ret (tok );
9090 goto error ;
9191 }
9292 }
@@ -95,14 +95,14 @@ tok_readline_recode(struct tok_state *tok) {
9595 }
9696 buf = PyUnicode_AsUTF8AndSize (line , & buflen );
9797 if (buf == NULL ) {
98- error_ret (tok );
98+ _PyTokenizer_error_ret (tok );
9999 goto error ;
100100 }
101101 // Make room for the null terminator *and* potentially
102102 // an extra newline character that we may need to artificially
103103 // add.
104104 size_t buffer_size = buflen + 2 ;
105- if (!tok_reserve_buf (tok , buffer_size )) {
105+ if (!_PyLexer_tok_reserve_buf (tok , buffer_size )) {
106106 goto error ;
107107 }
108108 memcpy (tok -> inp , buf , buflen );
@@ -132,7 +132,7 @@ static void fp_ungetc(int c, struct tok_state *tok) {
132132/* Set the readline function for TOK to a StreamReader's
133133 readline function. The StreamReader is named ENC.
134134
135- This function is called from check_bom and check_coding_spec .
135+ This function is called from _PyTokenizer_check_bom and _PyTokenizer_check_coding_spec .
136136
137137 ENC is usually identical to the future value of tok->encoding,
138138 except for the (currently unsupported) case of UTF-16.
@@ -195,7 +195,7 @@ tok_underflow_interactive(struct tok_state *tok) {
195195 }
196196 char * newtok = PyOS_Readline (tok -> fp ? tok -> fp : stdin , stdout , tok -> prompt );
197197 if (newtok != NULL ) {
198- char * translated = translate_newlines (newtok , 0 , 0 , tok );
198+ char * translated = _PyTokenizer_translate_newlines (newtok , 0 , 0 , tok );
199199 PyMem_Free (newtok );
200200 if (translated == NULL ) {
201201 return 0 ;
@@ -206,7 +206,7 @@ tok_underflow_interactive(struct tok_state *tok) {
206206 /* Recode to UTF-8 */
207207 Py_ssize_t buflen ;
208208 const char * buf ;
209- PyObject * u = translate_into_utf8 (newtok , tok -> encoding );
209+ PyObject * u = _PyTokenizer_translate_into_utf8 (newtok , tok -> encoding );
210210 PyMem_Free (newtok );
211211 if (u == NULL ) {
212212 tok -> done = E_DECODE ;
@@ -240,10 +240,10 @@ tok_underflow_interactive(struct tok_state *tok) {
240240 }
241241 else if (tok -> start != NULL ) {
242242 Py_ssize_t cur_multi_line_start = tok -> multi_line_start - tok -> buf ;
243- remember_fstring_buffers (tok );
243+ _PyLexer_remember_fstring_buffers (tok );
244244 size_t size = strlen (newtok );
245245 ADVANCE_LINENO ();
246- if (!tok_reserve_buf (tok , size + 1 )) {
246+ if (!_PyLexer_tok_reserve_buf (tok , size + 1 )) {
247247 PyMem_Free (tok -> buf );
248248 tok -> buf = NULL ;
249249 PyMem_Free (newtok );
@@ -253,18 +253,18 @@ tok_underflow_interactive(struct tok_state *tok) {
253253 PyMem_Free (newtok );
254254 tok -> inp += size ;
255255 tok -> multi_line_start = tok -> buf + cur_multi_line_start ;
256- restore_fstring_buffers (tok );
256+ _PyLexer_restore_fstring_buffers (tok );
257257 }
258258 else {
259- remember_fstring_buffers (tok );
259+ _PyLexer_remember_fstring_buffers (tok );
260260 ADVANCE_LINENO ();
261261 PyMem_Free (tok -> buf );
262262 tok -> buf = newtok ;
263263 tok -> cur = tok -> buf ;
264264 tok -> line_start = tok -> buf ;
265265 tok -> inp = strchr (tok -> buf , '\0' );
266266 tok -> end = tok -> inp + 1 ;
267- restore_fstring_buffers (tok );
267+ _PyLexer_restore_fstring_buffers (tok );
268268 }
269269 if (tok -> done != E_OK ) {
270270 if (tok -> prompt != NULL ) {
@@ -273,7 +273,7 @@ tok_underflow_interactive(struct tok_state *tok) {
273273 return 0 ;
274274 }
275275
276- if (tok -> tok_mode_stack_index && !update_fstring_expr (tok , 0 )) {
276+ if (tok -> tok_mode_stack_index && !_PyLexer_update_fstring_expr (tok , 0 )) {
277277 return 0 ;
278278 }
279279 return 1 ;
@@ -288,8 +288,8 @@ tok_underflow_file(struct tok_state *tok) {
288288 /* We have not yet determined the encoding.
289289 If an encoding is found, use the file-pointer
290290 reader functions from now on. */
291- if (!check_bom (fp_getc , fp_ungetc , fp_setreadl , tok )) {
292- error_ret (tok );
291+ if (!_PyTokenizer_check_bom (fp_getc , fp_ungetc , fp_setreadl , tok )) {
292+ _PyTokenizer_error_ret (tok );
293293 return 0 ;
294294 }
295295 assert (tok -> decoding_state != STATE_INIT );
@@ -320,7 +320,7 @@ tok_underflow_file(struct tok_state *tok) {
320320 tok -> implicit_newline = 1 ;
321321 }
322322
323- if (tok -> tok_mode_stack_index && !update_fstring_expr (tok , 0 )) {
323+ if (tok -> tok_mode_stack_index && !_PyLexer_update_fstring_expr (tok , 0 )) {
324324 return 0 ;
325325 }
326326
@@ -329,16 +329,16 @@ tok_underflow_file(struct tok_state *tok) {
329329 if (tok -> lineno > 2 ) {
330330 tok -> decoding_state = STATE_NORMAL ;
331331 }
332- else if (!check_coding_spec (tok -> cur , strlen (tok -> cur ),
332+ else if (!_PyTokenizer_check_coding_spec (tok -> cur , strlen (tok -> cur ),
333333 tok , fp_setreadl ))
334334 {
335335 return 0 ;
336336 }
337337 }
338338 /* The default encoding is UTF-8, so make sure we don't have any
339339 non-UTF-8 sequences in it. */
340- if (!tok -> encoding && !ensure_utf8 (tok -> cur , tok )) {
341- error_ret (tok );
340+ if (!tok -> encoding && !_PyTokenizer_ensure_utf8 (tok -> cur , tok )) {
341+ _PyTokenizer_error_ret (tok );
342342 return 0 ;
343343 }
344344 assert (tok -> done == E_OK );
@@ -350,7 +350,7 @@ struct tok_state *
350350_PyTokenizer_FromFile (FILE * fp , const char * enc ,
351351 const char * ps1 , const char * ps2 )
352352{
353- struct tok_state * tok = tok_new ();
353+ struct tok_state * tok = _PyTokenizer_tok_new ();
354354 if (tok == NULL )
355355 return NULL ;
356356 if ((tok -> buf = (char * )PyMem_Malloc (BUFSIZ )) == NULL ) {
@@ -370,7 +370,7 @@ _PyTokenizer_FromFile(FILE *fp, const char* enc,
370370 if (enc != NULL ) {
371371 /* Must copy encoding declaration since it
372372 gets copied into the parse tree. */
373- tok -> encoding = new_string (enc , strlen (enc ), tok );
373+ tok -> encoding = _PyTokenizer_new_string (enc , strlen (enc ), tok );
374374 if (!tok -> encoding ) {
375375 _PyTokenizer_Free (tok );
376376 return NULL ;
0 commit comments