@@ -268,6 +268,9 @@ struct compiler_unit {
268
268
int u_col_offset ; /* the offset of the current stmt */
269
269
int u_end_lineno ; /* the end line of the current stmt */
270
270
int u_end_col_offset ; /* the end offset of the current stmt */
271
+
272
+ /* true if we need to create an implicit basicblock before the next instr */
273
+ int u_need_new_implicit_block ;
271
274
};
272
275
273
276
/* This struct captures the global state of a compilation.
@@ -757,23 +760,13 @@ compiler_new_block(struct compiler *c)
757
760
return b ;
758
761
}
759
762
760
- static basicblock *
761
- compiler_next_block (struct compiler * c )
762
- {
763
- basicblock * block = compiler_new_block (c );
764
- if (block == NULL )
765
- return NULL ;
766
- c -> u -> u_curblock -> b_next = block ;
767
- c -> u -> u_curblock = block ;
768
- return block ;
769
- }
770
-
771
763
static basicblock *
772
764
compiler_use_next_block (struct compiler * c , basicblock * block )
773
765
{
774
766
assert (block != NULL );
775
767
c -> u -> u_curblock -> b_next = block ;
776
768
c -> u -> u_curblock = block ;
769
+ c -> u -> u_need_new_implicit_block = 0 ;
777
770
return block ;
778
771
}
779
772
@@ -1141,6 +1134,37 @@ PyCompile_OpcodeStackEffect(int opcode, int oparg)
1141
1134
return stack_effect (opcode , oparg , -1 );
1142
1135
}
1143
1136
1137
+ static int is_end_of_basic_block (struct instr * instr )
1138
+ {
1139
+ int opcode = instr -> i_opcode ;
1140
+
1141
+ return is_jump (instr ) ||
1142
+ opcode == RETURN_VALUE ||
1143
+ opcode == RAISE_VARARGS ||
1144
+ opcode == RERAISE ;
1145
+ }
1146
+
1147
+ static int
1148
+ compiler_use_new_implicit_block_if_needed (struct compiler * c )
1149
+ {
1150
+ if (c -> u -> u_need_new_implicit_block ) {
1151
+ basicblock * b = compiler_new_block (c );
1152
+ if (b == NULL ) {
1153
+ return -1 ;
1154
+ }
1155
+ compiler_use_next_block (c , b );
1156
+ }
1157
+ return 0 ;
1158
+ }
1159
+
1160
+ static void
1161
+ compiler_check_if_end_of_block (struct compiler * c , struct instr * instr )
1162
+ {
1163
+ if (is_end_of_basic_block (instr )) {
1164
+ c -> u -> u_need_new_implicit_block = 1 ;
1165
+ }
1166
+ }
1167
+
1144
1168
/* Add an opcode with no argument.
1145
1169
Returns 0 on failure, 1 on success.
1146
1170
*/
@@ -1149,23 +1173,29 @@ static int
1149
1173
compiler_addop_line (struct compiler * c , int opcode , int line ,
1150
1174
int end_line , int col_offset , int end_col_offset )
1151
1175
{
1152
- basicblock * b ;
1153
- struct instr * i ;
1154
- int off ;
1155
1176
assert (!HAS_ARG (opcode ) || IS_ARTIFICIAL (opcode ));
1156
- off = compiler_next_instr (c -> u -> u_curblock );
1157
- if (off < 0 )
1177
+
1178
+ if (compiler_use_new_implicit_block_if_needed (c ) < 0 ) {
1179
+ return -1 ;
1180
+ }
1181
+
1182
+ basicblock * b = c -> u -> u_curblock ;
1183
+ int off = compiler_next_instr (b );
1184
+ if (off < 0 ) {
1158
1185
return 0 ;
1159
- b = c -> u -> u_curblock ;
1160
- i = & b -> b_instr [off ];
1186
+ }
1187
+ struct instr * i = & b -> b_instr [off ];
1161
1188
i -> i_opcode = opcode ;
1162
1189
i -> i_oparg = 0 ;
1163
- if (opcode == RETURN_VALUE )
1190
+ if (opcode == RETURN_VALUE ) {
1164
1191
b -> b_return = 1 ;
1192
+ }
1165
1193
i -> i_lineno = line ;
1166
1194
i -> i_end_lineno = end_line ;
1167
1195
i -> i_col_offset = col_offset ;
1168
1196
i -> i_end_col_offset = end_col_offset ;
1197
+
1198
+ compiler_check_if_end_of_block (c , i );
1169
1199
return 1 ;
1170
1200
}
1171
1201
@@ -1377,29 +1407,35 @@ compiler_addop_i_line(struct compiler *c, int opcode, Py_ssize_t oparg,
1377
1407
int lineno , int end_lineno ,
1378
1408
int col_offset , int end_col_offset )
1379
1409
{
1380
- struct instr * i ;
1381
- int off ;
1382
-
1383
1410
/* oparg value is unsigned, but a signed C int is usually used to store
1384
1411
it in the C code (like Python/ceval.c).
1385
1412
1386
1413
Limit to 32-bit signed C int (rather than INT_MAX) for portability.
1387
1414
1388
1415
The argument of a concrete bytecode instruction is limited to 8-bit.
1389
1416
EXTENDED_ARG is used for 16, 24, and 32-bit arguments. */
1417
+
1390
1418
assert (HAS_ARG (opcode ));
1391
1419
assert (0 <= oparg && oparg <= 2147483647 );
1392
1420
1393
- off = compiler_next_instr (c -> u -> u_curblock );
1394
- if (off < 0 )
1421
+ if (compiler_use_new_implicit_block_if_needed (c ) < 0 ) {
1422
+ return -1 ;
1423
+ }
1424
+
1425
+ basicblock * b = c -> u -> u_curblock ;
1426
+ int off = compiler_next_instr (b );
1427
+ if (off < 0 ) {
1395
1428
return 0 ;
1396
- i = & c -> u -> u_curblock -> b_instr [off ];
1429
+ }
1430
+ struct instr * i = & b -> b_instr [off ];
1397
1431
i -> i_opcode = opcode ;
1398
1432
i -> i_oparg = Py_SAFE_DOWNCAST (oparg , Py_ssize_t , int );
1399
1433
i -> i_lineno = lineno ;
1400
1434
i -> i_end_lineno = end_lineno ;
1401
1435
i -> i_col_offset = col_offset ;
1402
1436
i -> i_end_col_offset = end_col_offset ;
1437
+
1438
+ compiler_check_if_end_of_block (c , i );
1403
1439
return 1 ;
1404
1440
}
1405
1441
@@ -1417,15 +1453,19 @@ compiler_addop_i_noline(struct compiler *c, int opcode, Py_ssize_t oparg)
1417
1453
return compiler_addop_i_line (c , opcode , oparg , -1 , 0 , 0 , 0 );
1418
1454
}
1419
1455
1420
- static int add_jump_to_block (basicblock * b , int opcode ,
1456
+ static int add_jump_to_block (struct compiler * c , int opcode ,
1421
1457
int lineno , int end_lineno ,
1422
1458
int col_offset , int end_col_offset ,
1423
1459
basicblock * target )
1424
1460
{
1425
1461
assert (HAS_ARG (opcode ));
1426
- assert (b != NULL );
1427
1462
assert (target != NULL );
1428
1463
1464
+ if (compiler_use_new_implicit_block_if_needed (c ) < 0 ) {
1465
+ return -1 ;
1466
+ }
1467
+
1468
+ basicblock * b = c -> u -> u_curblock ;
1429
1469
int off = compiler_next_instr (b );
1430
1470
struct instr * i = & b -> b_instr [off ];
1431
1471
if (off < 0 ) {
@@ -1437,32 +1477,23 @@ static int add_jump_to_block(basicblock *b, int opcode,
1437
1477
i -> i_end_lineno = end_lineno ;
1438
1478
i -> i_col_offset = col_offset ;
1439
1479
i -> i_end_col_offset = end_col_offset ;
1480
+
1481
+ compiler_check_if_end_of_block (c , i );
1440
1482
return 1 ;
1441
1483
}
1442
1484
1443
1485
static int
1444
1486
compiler_addop_j (struct compiler * c , int opcode , basicblock * b )
1445
1487
{
1446
- return add_jump_to_block (c -> u -> u_curblock , opcode , c -> u -> u_lineno ,
1488
+ return add_jump_to_block (c , opcode , c -> u -> u_lineno ,
1447
1489
c -> u -> u_end_lineno , c -> u -> u_col_offset ,
1448
1490
c -> u -> u_end_col_offset , b );
1449
1491
}
1450
1492
1451
1493
static int
1452
1494
compiler_addop_j_noline (struct compiler * c , int opcode , basicblock * b )
1453
1495
{
1454
- return add_jump_to_block (c -> u -> u_curblock , opcode , -1 , 0 , 0 , 0 , b );
1455
- }
1456
-
1457
- /* NEXT_BLOCK() creates an implicit jump from the current block
1458
- to the new block.
1459
-
1460
- The returns inside this macro make it impossible to decref objects
1461
- created in the local function. Local objects should use the arena.
1462
- */
1463
- #define NEXT_BLOCK (C ) { \
1464
- if (compiler_next_block((C)) == NULL) \
1465
- return 0; \
1496
+ return add_jump_to_block (c , opcode , -1 , 0 , 0 , 0 , b );
1466
1497
}
1467
1498
1468
1499
#define ADDOP (C , OP ) { \
@@ -2823,12 +2854,10 @@ compiler_jump_if(struct compiler *c, expr_ty e, basicblock *next, int cond)
2823
2854
ADDOP_I (c , COPY , 2 );
2824
2855
ADDOP_COMPARE (c , asdl_seq_GET (e -> v .Compare .ops , i ));
2825
2856
ADDOP_JUMP (c , POP_JUMP_IF_FALSE , cleanup );
2826
- NEXT_BLOCK (c );
2827
2857
}
2828
2858
VISIT (c , expr , (expr_ty )asdl_seq_GET (e -> v .Compare .comparators , n ));
2829
2859
ADDOP_COMPARE (c , asdl_seq_GET (e -> v .Compare .ops , n ));
2830
2860
ADDOP_JUMP (c , cond ? POP_JUMP_IF_TRUE : POP_JUMP_IF_FALSE , next );
2831
- NEXT_BLOCK (c );
2832
2861
basicblock * end = compiler_new_block (c );
2833
2862
if (end == NULL )
2834
2863
return 0 ;
@@ -2852,7 +2881,6 @@ compiler_jump_if(struct compiler *c, expr_ty e, basicblock *next, int cond)
2852
2881
/* general implementation */
2853
2882
VISIT (c , expr , e );
2854
2883
ADDOP_JUMP (c , cond ? POP_JUMP_IF_TRUE : POP_JUMP_IF_FALSE , next );
2855
- NEXT_BLOCK (c );
2856
2884
return 1 ;
2857
2885
}
2858
2886
@@ -3128,7 +3156,6 @@ compiler_return(struct compiler *c, stmt_ty s)
3128
3156
ADDOP_LOAD_CONST (c , s -> v .Return .value -> v .Constant .value );
3129
3157
}
3130
3158
ADDOP (c , RETURN_VALUE );
3131
- NEXT_BLOCK (c );
3132
3159
3133
3160
return 1 ;
3134
3161
}
@@ -3149,7 +3176,6 @@ compiler_break(struct compiler *c)
3149
3176
return 0 ;
3150
3177
}
3151
3178
ADDOP_JUMP (c , JUMP_ABSOLUTE , loop -> fb_exit );
3152
- NEXT_BLOCK (c );
3153
3179
return 1 ;
3154
3180
}
3155
3181
@@ -3166,7 +3192,6 @@ compiler_continue(struct compiler *c)
3166
3192
return compiler_error (c , "'continue' not properly in loop" );
3167
3193
}
3168
3194
ADDOP_JUMP (c , JUMP_ABSOLUTE , loop -> fb_block );
3169
- NEXT_BLOCK (c )
3170
3195
return 1 ;
3171
3196
}
3172
3197
@@ -3348,7 +3373,6 @@ compiler_try_except(struct compiler *c, stmt_ty s)
3348
3373
compiler_pop_fblock (c , TRY_EXCEPT , body );
3349
3374
ADDOP_NOLINE (c , POP_BLOCK );
3350
3375
if (s -> v .Try .orelse && asdl_seq_LEN (s -> v .Try .orelse )) {
3351
- NEXT_BLOCK (c );
3352
3376
VISIT_SEQ (c , stmt , s -> v .Try .orelse );
3353
3377
}
3354
3378
ADDOP_JUMP_NOLINE (c , JUMP_FORWARD , end );
@@ -3374,7 +3398,6 @@ compiler_try_except(struct compiler *c, stmt_ty s)
3374
3398
if (handler -> v .ExceptHandler .type ) {
3375
3399
VISIT (c , expr , handler -> v .ExceptHandler .type );
3376
3400
ADDOP_JUMP (c , JUMP_IF_NOT_EXC_MATCH , except );
3377
- NEXT_BLOCK (c );
3378
3401
}
3379
3402
if (handler -> v .ExceptHandler .name ) {
3380
3403
basicblock * cleanup_end , * cleanup_body ;
@@ -3580,7 +3603,6 @@ compiler_try_star_except(struct compiler *c, stmt_ty s)
3580
3603
if (handler -> v .ExceptHandler .type ) {
3581
3604
VISIT (c , expr , handler -> v .ExceptHandler .type );
3582
3605
ADDOP_JUMP (c , JUMP_IF_NOT_EG_MATCH , except );
3583
- NEXT_BLOCK (c );
3584
3606
}
3585
3607
3586
3608
basicblock * cleanup_end = compiler_new_block (c );
@@ -3665,7 +3687,6 @@ compiler_try_star_except(struct compiler *c, stmt_ty s)
3665
3687
ADDOP (c , PREP_RERAISE_STAR );
3666
3688
ADDOP_I (c , COPY , 1 );
3667
3689
ADDOP_JUMP (c , POP_JUMP_IF_NOT_NONE , reraise );
3668
- NEXT_BLOCK (c );
3669
3690
3670
3691
/* Nothing to reraise */
3671
3692
ADDOP (c , POP_TOP );
@@ -3957,7 +3978,6 @@ compiler_visit_stmt(struct compiler *c, stmt_ty s)
3957
3978
}
3958
3979
}
3959
3980
ADDOP_I (c , RAISE_VARARGS , (int )n );
3960
- NEXT_BLOCK (c );
3961
3981
break ;
3962
3982
case Try_kind :
3963
3983
return compiler_try (c , s );
@@ -4503,7 +4523,6 @@ compiler_compare(struct compiler *c, expr_ty e)
4503
4523
ADDOP_I (c , COPY , 2 );
4504
4524
ADDOP_COMPARE (c , asdl_seq_GET (e -> v .Compare .ops , i ));
4505
4525
ADDOP_JUMP (c , JUMP_IF_FALSE_OR_POP , cleanup );
4506
- NEXT_BLOCK (c );
4507
4526
}
4508
4527
VISIT (c , expr , (expr_ty )asdl_seq_GET (e -> v .Compare .comparators , n ));
4509
4528
ADDOP_COMPARE (c , asdl_seq_GET (e -> v .Compare .ops , n ));
@@ -5093,7 +5112,6 @@ compiler_sync_comprehension_generator(struct compiler *c,
5093
5112
depth ++ ;
5094
5113
compiler_use_next_block (c , start );
5095
5114
ADDOP_JUMP (c , FOR_ITER , anchor );
5096
- NEXT_BLOCK (c );
5097
5115
}
5098
5116
VISIT (c , expr , gen -> target );
5099
5117
@@ -5103,7 +5121,6 @@ compiler_sync_comprehension_generator(struct compiler *c,
5103
5121
expr_ty e = (expr_ty )asdl_seq_GET (gen -> ifs , i );
5104
5122
if (!compiler_jump_if (c , e , if_cleanup , 0 ))
5105
5123
return 0 ;
5106
- NEXT_BLOCK (c );
5107
5124
}
5108
5125
5109
5126
if (++ gen_index < asdl_seq_LEN (generators ))
@@ -5198,7 +5215,6 @@ compiler_async_comprehension_generator(struct compiler *c,
5198
5215
expr_ty e = (expr_ty )asdl_seq_GET (gen -> ifs , i );
5199
5216
if (!compiler_jump_if (c , e , if_cleanup , 0 ))
5200
5217
return 0 ;
5201
- NEXT_BLOCK (c );
5202
5218
}
5203
5219
5204
5220
depth ++ ;
@@ -5410,7 +5426,6 @@ compiler_with_except_finish(struct compiler *c, basicblock * cleanup) {
5410
5426
if (exit == NULL )
5411
5427
return 0 ;
5412
5428
ADDOP_JUMP (c , POP_JUMP_IF_TRUE , exit );
5413
- NEXT_BLOCK (c );
5414
5429
ADDOP_I (c , RERAISE , 2 );
5415
5430
compiler_use_next_block (c , cleanup );
5416
5431
POP_EXCEPT_AND_RERAISE (c );
@@ -6149,7 +6164,6 @@ jump_to_fail_pop(struct compiler *c, pattern_context *pc, int op)
6149
6164
Py_ssize_t pops = pc -> on_top + PyList_GET_SIZE (pc -> stores );
6150
6165
RETURN_IF_FALSE (ensure_fail_pop (c , pc , pops ));
6151
6166
ADDOP_JUMP (c , op , pc -> fail_pop [pops ]);
6152
- NEXT_BLOCK (c );
6153
6167
return 1 ;
6154
6168
}
6155
6169
@@ -6159,7 +6173,6 @@ emit_and_reset_fail_pop(struct compiler *c, pattern_context *pc)
6159
6173
{
6160
6174
if (!pc -> fail_pop_size ) {
6161
6175
assert (pc -> fail_pop == NULL );
6162
- NEXT_BLOCK (c );
6163
6176
return 1 ;
6164
6177
}
6165
6178
while (-- pc -> fail_pop_size ) {
@@ -6662,7 +6675,6 @@ compiler_pattern_or(struct compiler *c, pattern_ty p, pattern_context *pc)
6662
6675
}
6663
6676
assert (control );
6664
6677
if (!compiler_addop_j (c , JUMP_FORWARD , end ) ||
6665
- !compiler_next_block (c ) ||
6666
6678
!emit_and_reset_fail_pop (c , pc ))
6667
6679
{
6668
6680
goto error ;
@@ -8136,10 +8148,7 @@ assemble(struct compiler *c, int addNone)
8136
8148
PyCodeObject * co = NULL ;
8137
8149
PyObject * consts = NULL ;
8138
8150
8139
- /* Make sure every block that falls off the end returns None.
8140
- XXX NEXT_BLOCK() isn't quite right, because if the last
8141
- block ends with a jump or return b_next shouldn't set.
8142
- */
8151
+ /* Make sure every block that falls off the end returns None. */
8143
8152
if (!c -> u -> u_curblock -> b_return ) {
8144
8153
UNSET_LOC (c );
8145
8154
if (addNone )
0 commit comments