@@ -1303,18 +1303,26 @@ static void
1303
1303
process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1304
1304
struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
1305
1305
void * state )
1306
+ {
1307
+ assert (PyMutex_IsLocked (& queue -> mutex ));
1308
+ process_queue (& queue -> head , qsbr , false, cb , state );
1309
+
1310
+ int more_work = !llist_empty (& queue -> head );
1311
+ _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1312
+ }
1313
+
1314
+ static void
1315
+ maybe_process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1316
+ struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
1317
+ void * state )
1306
1318
{
1307
1319
if (!_Py_atomic_load_int_relaxed (& queue -> has_work )) {
1308
1320
return ;
1309
1321
}
1310
1322
1311
1323
// Try to acquire the lock, but don't block if it's already held.
1312
1324
if (_PyMutex_LockTimed (& queue -> mutex , 0 , 0 ) == PY_LOCK_ACQUIRED ) {
1313
- process_queue (& queue -> head , qsbr , false, cb , state );
1314
-
1315
- int more_work = !llist_empty (& queue -> head );
1316
- _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1317
-
1325
+ process_interp_queue (queue , qsbr , cb , state );
1318
1326
PyMutex_Unlock (& queue -> mutex );
1319
1327
}
1320
1328
}
@@ -1329,7 +1337,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
1329
1337
process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, NULL , NULL );
1330
1338
1331
1339
// Process shared interpreter work
1332
- process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , NULL , NULL );
1340
+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , NULL , NULL );
1333
1341
}
1334
1342
1335
1343
void
@@ -1342,7 +1350,7 @@ _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, voi
1342
1350
process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, cb , state );
1343
1351
1344
1352
// Process shared interpreter work
1345
- process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , cb , state );
1353
+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , cb , state );
1346
1354
}
1347
1355
1348
1356
void
@@ -1364,10 +1372,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
1364
1372
return ;
1365
1373
}
1366
1374
1367
- // Merge the thread's work queue into the interpreter's work queue.
1368
1375
PyMutex_Lock (& interp -> mem_free_queue .mutex );
1376
+
1377
+ // Merge the thread's work queue into the interpreter's work queue.
1369
1378
llist_concat (& interp -> mem_free_queue .head , queue );
1370
- _Py_atomic_store_int_relaxed (& interp -> mem_free_queue .has_work , 1 );
1379
+
1380
+ // Process the merged queue now (see gh-130794).
1381
+ _PyThreadStateImpl * this_tstate = (_PyThreadStateImpl * )_PyThreadState_GET ();
1382
+ process_interp_queue (& interp -> mem_free_queue , this_tstate -> qsbr , NULL , NULL );
1383
+
1371
1384
PyMutex_Unlock (& interp -> mem_free_queue .mutex );
1372
1385
1373
1386
assert (llist_empty (queue )); // the thread's queue is now empty
0 commit comments