Skip to content

Commit 2555192

Browse files
feat(freertos-smp): Added support for TCB locks
1 parent e850728 commit 2555192

File tree

3 files changed

+230
-15
lines changed

3 files changed

+230
-15
lines changed

include/FreeRTOS.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,25 @@
373373
#define portUSING_GRANULAR_LOCKS 0
374374
#endif
375375

376+
/* configUSE_TCB_DATA_GROUP_LOCK enables per-TCB spinlocks to protect TCB-specific
377+
* data such as uxPreemptionDisable. This reduces lock contention compared to using
378+
* the global kernel lock. When enabled:
379+
* - Each TCB has its own spinlock (xTCBSpinlock)
380+
* - vTaskPreemptionDisable/Enable use the TCB lock instead of kernel lock
381+
* - prvYieldCore acquires the target TCB's lock before checking uxPreemptionDisable
382+
* This feature requires portUSING_GRANULAR_LOCKS and multi-core. */
383+
#ifndef configUSE_TCB_DATA_GROUP_LOCK
384+
#define configUSE_TCB_DATA_GROUP_LOCK 0
385+
#endif
386+
387+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
388+
#error configUSE_TCB_DATA_GROUP_LOCK requires portUSING_GRANULAR_LOCKS to be enabled
389+
#endif
390+
391+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
392+
#error configUSE_TCB_DATA_GROUP_LOCK is not supported in single core FreeRTOS
393+
#endif
394+
376395
#ifndef configMAX_TASK_NAME_LEN
377396
#define configMAX_TASK_NAME_LEN 16
378397
#endif
@@ -3296,6 +3315,9 @@ typedef struct xSTATIC_TCB
32963315
void * pvDummyDirectTransferBuffer;
32973316
BaseType_t xDummyDirectTransferPosition;
32983317
#endif
3318+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3319+
portSPINLOCK_TYPE xTCBDummySpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
3320+
#endif
32993321
} StaticTask_t;
33003322

33013323
/*

include/task.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,14 @@ typedef enum
360360
{ \
361361
mtCOVERAGE_TEST_MARKER(); \
362362
} \
363-
/* Re-enable preemption */ \
363+
/* IMPORTANT: Do NOT enable interrupts here! uxPreemptionDisable is still \
364+
* > 0 at this point. prvTaskPreemptionEnable() will decrement it and \
365+
* enable interrupts safely via vTaskTCBExitCritical() when BOTH conditions \
366+
* are met: critical nesting count == 0 AND uxPreemptionDisable == 0. \
367+
* Enabling interrupts here would create a race where an interrupt could \
368+
* trigger a context switch while uxPreemptionDisable > 0, causing an \
369+
* assertion failure in vTaskSwitchContext. */ \
370+
/* Re-enable preemption - this handles interrupt enabling safely */ \
364371
prvTaskPreemptionEnable( NULL ); \
365372
} while( 0 )
366373
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

tasks.c

Lines changed: 200 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,43 @@
350350
/* Yields the given core. This must be called from a critical section and xCoreID
351351
* must be valid. This macro is not required in single core since there is only
352352
* one core to yield. */
353-
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
353+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
354+
/* When TCB data group lock is enabled, we need to acquire the target core's
355+
* TCB spinlock before checking uxPreemptionDisable to prevent a race condition
356+
* where the target core could disable preemption between our check and the
357+
* cross-core interrupt arriving. */
358+
#define prvYieldCore( xCoreID ) \
359+
do { \
360+
BaseType_t xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID(); \
361+
BaseType_t xCoreToYield = ( xCoreID ); \
362+
if( xCoreToYield == xCurrentCoreID ) \
363+
{ \
364+
/* Pending a yield for this core since it is in the critical section. */ \
365+
xYieldPendings[ xCoreToYield ] = pdTRUE; \
366+
} \
367+
else \
368+
{ \
369+
/* Acquire the target core's TCB spinlock to prevent race with vTaskPreemptionDisable. */ \
370+
portGET_SPINLOCK( xCurrentCoreID, &( pxCurrentTCBs[ xCoreToYield ]->xTCBSpinlock ) ); \
371+
{ \
372+
if( pxCurrentTCBs[ xCoreToYield ]->uxPreemptionDisable == 0U ) \
373+
{ \
374+
/* Request other core to yield if it is not requested before. */ \
375+
if( pxCurrentTCBs[ xCoreToYield ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
376+
{ \
377+
portYIELD_CORE( xCoreToYield ); \
378+
pxCurrentTCBs[ xCoreToYield ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \
379+
} \
380+
} \
381+
else \
382+
{ \
383+
xYieldPendings[ xCoreToYield ] = pdTRUE; \
384+
} \
385+
} \
386+
portRELEASE_SPINLOCK( xCurrentCoreID, &( pxCurrentTCBs[ xCoreToYield ]->xTCBSpinlock ) ); \
387+
} \
388+
} while( 0 )
389+
#elif ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
354390
#define prvYieldCore( xCoreID ) \
355391
do { \
356392
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
@@ -375,7 +411,7 @@
375411
} \
376412
} \
377413
} while( 0 )
378-
#else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
414+
#else /* if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
379415
#define prvYieldCore( xCoreID ) \
380416
do { \
381417
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
@@ -393,7 +429,7 @@
393429
} \
394430
} \
395431
} while( 0 )
396-
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
432+
#endif /* #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
397433
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
398434
/*-----------------------------------------------------------*/
399435

@@ -524,6 +560,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
524560
* NULL when not using direct transfer */
525561
BaseType_t xDirectTransferPosition; /**< Position for direct transfer (queueSEND_TO_BACK, queueSEND_TO_FRONT, queueOVERWRITE) */
526562
#endif
563+
564+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
565+
portSPINLOCK_TYPE xTCBSpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
566+
#endif
527567
} tskTCB;
528568

529569
/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
@@ -2173,6 +2213,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
21732213
}
21742214
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
21752215

2216+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
2217+
{
2218+
portINIT_SPINLOCK( &( pxNewTCB->xTCBSpinlock ) );
2219+
}
2220+
#endif
2221+
21762222
if( pxCreatedTask != NULL )
21772223
{
21782224
/* Pass the handle out in an anonymous way. The handle can be used to
@@ -3311,6 +3357,123 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33113357

33123358
/*-----------------------------------------------------------*/
33133359

3360+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
3361+
3362+
static void prvTaskTCBLockCheckForRunStateChange( void )
3363+
{
3364+
const TCB_t * pxThisTCB;
3365+
BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3366+
3367+
/* This must only be called from within a task. */
3368+
portASSERT_IF_IN_ISR();
3369+
3370+
/* This function is always called with interrupts disabled
3371+
* so this is safe. */
3372+
pxThisTCB = pxCurrentTCBs[ xCoreID ];
3373+
3374+
while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
3375+
{
3376+
UBaseType_t uxPrevCriticalNesting;
3377+
3378+
/* We are only here if we just entered a critical section
3379+
* or if we just suspended the scheduler, and another task
3380+
* has requested that we yield.
3381+
*
3382+
* This is slightly complicated since we need to save and restore
3383+
* the suspension and critical nesting counts, as well as release
3384+
* and reacquire the correct locks. And then, do it all over again
3385+
* if our state changed again during the reacquisition. */
3386+
uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT( xCoreID );
3387+
3388+
if( uxPrevCriticalNesting > 0U )
3389+
{
3390+
portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U );
3391+
portRELEASE_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3392+
}
3393+
else
3394+
{
3395+
/* The scheduler is suspended. uxSchedulerSuspended is updated
3396+
* only when the task is not requested to yield. */
3397+
mtCOVERAGE_TEST_MARKER();
3398+
}
3399+
3400+
portMEMORY_BARRIER();
3401+
3402+
portENABLE_INTERRUPTS();
3403+
3404+
/* Enabling interrupts should cause this core to immediately service
3405+
* the pending interrupt and yield. After servicing the pending interrupt,
3406+
* the task needs to re-evaluate its run state within this loop, as
3407+
* other cores may have requested this task to yield, potentially altering
3408+
* its run state. */
3409+
3410+
portDISABLE_INTERRUPTS();
3411+
3412+
xCoreID = ( BaseType_t ) portGET_CORE_ID();
3413+
portGET_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3414+
3415+
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
3416+
}
3417+
}
3418+
3419+
void vTaskTCBEnterCritical( void )
3420+
{
3421+
if( xSchedulerRunning != pdFALSE )
3422+
{
3423+
portDISABLE_INTERRUPTS();
3424+
{
3425+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3426+
3427+
portGET_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3428+
3429+
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
3430+
3431+
if( ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) &&
3432+
( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) )
3433+
{
3434+
prvTaskTCBLockCheckForRunStateChange();
3435+
}
3436+
}
3437+
}
3438+
}
3439+
3440+
void vTaskTCBExitCritical( void )
3441+
{
3442+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3443+
3444+
if( xSchedulerRunning != pdFALSE )
3445+
{
3446+
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
3447+
{
3448+
BaseType_t xYieldCurrentTask = pdFALSE;
3449+
3450+
/* Get the xYieldPending stats inside the critical section. */
3451+
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
3452+
{
3453+
xYieldCurrentTask = xYieldPendings[ xCoreID ];
3454+
}
3455+
3456+
portRELEASE_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3457+
3458+
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
3459+
3460+
/* If the critical nesting count is 0, enable interrupts */
3461+
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
3462+
{
3463+
portENABLE_INTERRUPTS();
3464+
3465+
if( xYieldCurrentTask != pdFALSE )
3466+
{
3467+
portYIELD();
3468+
}
3469+
}
3470+
}
3471+
}
3472+
}
3473+
3474+
#endif /* #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
3475+
/*-----------------------------------------------------------*/
3476+
33143477
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
33153478

33163479
void vTaskPreemptionDisable( const TaskHandle_t xTask )
@@ -3319,8 +3482,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33193482

33203483
traceENTER_vTaskPreemptionDisable( xTask );
33213484

3322-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3323-
vKernelLightWeightEnterCritical();
3485+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3486+
vTaskTCBEnterCritical();
33243487
#else
33253488
kernelENTER_CRITICAL();
33263489
#endif
@@ -3337,8 +3500,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33373500
mtCOVERAGE_TEST_MARKER();
33383501
}
33393502
}
3340-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3341-
vKernelLightWeightExitCritical();
3503+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3504+
vTaskTCBExitCritical();
33423505
#else
33433506
kernelEXIT_CRITICAL();
33443507
#endif
@@ -3356,15 +3519,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33563519
TCB_t * pxTCB;
33573520
UBaseType_t uxDeferredAction = 0U;
33583521
BaseType_t xAlreadyYielded = pdFALSE;
3522+
BaseType_t xTaskRequestedToYield = pdFALSE;
33593523

3360-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3361-
vKernelLightWeightEnterCritical();
3524+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3525+
vTaskTCBEnterCritical();
33623526
#else
33633527
kernelENTER_CRITICAL();
33643528
#endif
33653529
{
33663530
if( xSchedulerRunning != pdFALSE )
33673531
{
3532+
/* Current task running on the core can not be changed by other core.
3533+
* Get TCB from handle is safe to call within TCB critical section. */
33683534
pxTCB = prvGetTCBFromHandle( xTask );
33693535
configASSERT( pxTCB != NULL );
33703536
configASSERT( pxTCB->uxPreemptionDisable > 0U );
@@ -3381,8 +3547,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33813547
{
33823548
if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) )
33833549
{
3384-
prvYieldCore( pxTCB->xTaskRunState );
3385-
xAlreadyYielded = pdTRUE;
3550+
xTaskRequestedToYield = pdTRUE;
33863551
}
33873552
else
33883553
{
@@ -3400,8 +3565,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34003565
mtCOVERAGE_TEST_MARKER();
34013566
}
34023567
}
3403-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3404-
vKernelLightWeightExitCritical();
3568+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3569+
vTaskTCBExitCritical();
34053570
#else
34063571
kernelEXIT_CRITICAL();
34073572
#endif
@@ -3424,6 +3589,26 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34243589
/* Any deferred action on the task would result in a context switch. */
34253590
xAlreadyYielded = pdTRUE;
34263591
}
3592+
else
3593+
{
3594+
if( xTaskRequestedToYield != pdFALSE )
3595+
{
3596+
/* prvYieldCore must be called in critical section. */
3597+
kernelENTER_CRITICAL();
3598+
{
3599+
pxTCB = prvGetTCBFromHandle( xTask );
3600+
/* There is gap between TCB critical section and kernel critical section.
3601+
* Checking the yield pending again to prevent that the current task
3602+
* already handle the yield request. */
3603+
if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) )
3604+
{
3605+
prvYieldCore( pxTCB->xTaskRunState );
3606+
}
3607+
}
3608+
kernelEXIT_CRITICAL();
3609+
xAlreadyYielded = pdTRUE;
3610+
}
3611+
}
34273612

34283613
return xAlreadyYielded;
34293614
}
@@ -7576,7 +7761,8 @@ static void prvResetNextTaskUnblockTime( void )
75767761
* interrupt. Only assert if the critical nesting count is 1 to
75777762
* protect against recursive calls if the assert function also uses a
75787763
* critical section. */
7579-
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
7764+
if( ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) &&
7765+
( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) )
75807766
{
75817767
portASSERT_IF_IN_ISR();
75827768

0 commit comments

Comments
 (0)