@@ -1108,9 +1108,22 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
1108
1108
_PyFrame_SetStackPointer (frame , stack_pointer );
1109
1109
// Increment side exit counter for this uop
1110
1110
int pc = next_uop - 1 - current_executor -> trace ;
1111
+ _PyExecutorObject * * pexecutor = current_executor -> executors + pc ;
1112
+ if (* pexecutor != NULL ) {
1113
+ PyCodeObject * code = _PyFrame_GetCode (frame );
1114
+ DPRINTF (2 , "Jumping to new executor for %s (%s:%d) at byte offset %d\n" ,
1115
+ PyUnicode_AsUTF8 (code -> co_qualname ),
1116
+ PyUnicode_AsUTF8 (code -> co_filename ),
1117
+ code -> co_firstlineno ,
1118
+ 2 * (int )(frame -> instr_ptr - _PyCode_CODE (_PyFrame_GetCode (frame ))));
1119
+ Py_DECREF (current_executor );
1120
+ current_executor = (_PyUOpExecutorObject * )* pexecutor ;
1121
+ Py_INCREF (current_executor );
1122
+ goto enter_tier_two ;
1123
+ }
1111
1124
uint16_t * pcounter = current_executor -> counters + pc ;
1112
1125
* pcounter += 1 ;
1113
- if (* pcounter == 16 && // TODO: use resume_threshold
1126
+ if (* pcounter == 32 && // TODO: use resume_threshold
1114
1127
tstate -> interp -> optimizer != & _PyOptimizer_Default &&
1115
1128
(opcode == POP_JUMP_IF_FALSE ||
1116
1129
opcode == POP_JUMP_IF_TRUE ||
@@ -1121,14 +1134,32 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
1121
1134
_PyUOpName (uopcode ), pc , current_executor , (int )(* pcounter ));
1122
1135
DPRINTF (2 , " T1: %s\n" , _PyOpcode_OpName [opcode ]);
1123
1136
// The counter will cycle around once the 16 bits overflow
1124
- int optimized = _PyOptimizer_Anywhere (frame , src , dest , stack_pointer );
1137
+ int optimized = _PyOptimizer_Unanchored (frame , dest , pexecutor , stack_pointer );
1125
1138
if (optimized < 0 ) {
1126
1139
goto error_tier_two ;
1127
1140
}
1128
1141
if (optimized ) {
1129
1142
DPRINTF (1 , "--> Optimized %s @ %d in %p\n" ,
1130
1143
_PyUOpName (uopcode ), pc , current_executor );
1131
1144
DPRINTF (1 , " T1: %s\n" , _PyOpcode_OpName [src -> op .code ]);
1145
+ PyCodeObject * code = _PyFrame_GetCode (frame );
1146
+ DPRINTF (2 , "Jumping to fresh executor for %s (%s:%d) at byte offset %d\n" ,
1147
+ PyUnicode_AsUTF8 (code -> co_qualname ),
1148
+ PyUnicode_AsUTF8 (code -> co_filename ),
1149
+ code -> co_firstlineno ,
1150
+ 2 * (int )(frame -> instr_ptr - _PyCode_CODE (_PyFrame_GetCode (frame ))));
1151
+ Py_DECREF (current_executor );
1152
+ current_executor = (_PyUOpExecutorObject * )* pexecutor ;
1153
+ if (current_executor -> trace [0 ].opcode != uopcode ) {
1154
+ Py_INCREF (current_executor );
1155
+ goto enter_tier_two ;
1156
+ }
1157
+ // This is guaranteed to deopt again; forget about it
1158
+ DPRINTF (2 , "It's not an improvement -- discarding trace\n" );
1159
+ * pexecutor = NULL ;
1160
+ Py_DECREF (current_executor );
1161
+ next_instr = frame -> instr_ptr ;
1162
+ goto resume_frame ;
1132
1163
}
1133
1164
else {
1134
1165
DPRINTF (2 , "--> Failed to optimize %s @ %d in %p\n" ,
0 commit comments