350350/* Yields the given core. This must be called from a critical section and xCoreID
351351 * must be valid. This macro is not required in single core since there is only
352352 * one core to yield. */
353- #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
353+ #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
354+ /* When TCB data group lock is enabled, we need to acquire the target core's
355+ * TCB spinlock before checking uxPreemptionDisable to prevent a race condition
356+ * where the target core could disable preemption between our check and the
357+ * cross-core interrupt arriving. */
358+ #define prvYieldCore ( xCoreID ) \
359+ do { \
360+ BaseType_t xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID(); \
361+ BaseType_t xCoreToYield = ( xCoreID ); \
362+ if( xCoreToYield == xCurrentCoreID ) \
363+ { \
364+ /* Pending a yield for this core since it is in the critical section. */ \
365+ xYieldPendings [ xCoreToYield ] = pdTRUE ; \
366+ } \
367+ else \
368+ { \
369+ /* Acquire the target core's TCB spinlock to prevent race with vTaskPreemptionDisable. */ \
370+ portGET_SPINLOCK ( xCurrentCoreID , & ( pxCurrentTCBs [ xCoreToYield ]-> xTCBSpinlock ) ); \
371+ { \
372+ if ( pxCurrentTCBs [ xCoreToYield ]-> uxPreemptionDisable == 0U ) \
373+ { \
374+ /* Request other core to yield if it is not requested before. */ \
375+ if ( pxCurrentTCBs [ xCoreToYield ]-> xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
376+ { \
377+ portYIELD_CORE ( xCoreToYield ); \
378+ pxCurrentTCBs [ xCoreToYield ]-> xTaskRunState = taskTASK_SCHEDULED_TO_YIELD ; \
379+ } \
380+ } \
381+ else \
382+ { \
383+ xYieldPendings [ xCoreToYield ] = pdTRUE ; \
384+ } \
385+ } \
386+ portRELEASE_SPINLOCK ( xCurrentCoreID , & ( pxCurrentTCBs [ xCoreToYield ]-> xTCBSpinlock ) ); \
387+ } \
388+ } while ( 0 )
389+ #elif ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
354390 #define prvYieldCore ( xCoreID ) \
355391 do { \
356392 if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
375411 } \
376412 } \
377413 } while ( 0 )
378- #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
414+ #else /* if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
379415 #define prvYieldCore ( xCoreID ) \
380416 do { \
381417 if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
393429 } \
394430 } \
395431 } while ( 0 )
396- #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
432+ #endif /* #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
397433#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
398434/*-----------------------------------------------------------*/
399435
@@ -524,6 +560,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
524560 * NULL when not using direct transfer */
525561 BaseType_t xDirectTransferPosition ; /**< Position for direct transfer (queueSEND_TO_BACK, queueSEND_TO_FRONT, queueOVERWRITE) */
526562 #endif
563+
564+ #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
565+ portSPINLOCK_TYPE xTCBSpinlock ; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
566+ #endif
527567} tskTCB ;
528568
529569/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
@@ -2173,6 +2213,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
21732213 }
21742214 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
21752215
2216+ #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
2217+ {
2218+ portINIT_SPINLOCK ( & ( pxNewTCB -> xTCBSpinlock ) );
2219+ }
2220+ #endif
2221+
21762222 if ( pxCreatedTask != NULL )
21772223 {
21782224 /* Pass the handle out in an anonymous way. The handle can be used to
@@ -3311,6 +3357,123 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33113357
33123358/*-----------------------------------------------------------*/
33133359
3360+ #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
3361+
3362+ static void prvTaskTCBLockCheckForRunStateChange ( void )
3363+ {
3364+ const TCB_t * pxThisTCB ;
3365+ BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
3366+
3367+ /* This must only be called from within a task. */
3368+ portASSERT_IF_IN_ISR ();
3369+
3370+ /* This function is always called with interrupts disabled
3371+ * so this is safe. */
3372+ pxThisTCB = pxCurrentTCBs [ xCoreID ];
3373+
3374+ while ( pxThisTCB -> xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
3375+ {
3376+ UBaseType_t uxPrevCriticalNesting ;
3377+
3378+ /* We are only here if we just entered a critical section
3379+ * or if we just suspended the scheduler, and another task
3380+ * has requested that we yield.
3381+ *
3382+ * This is slightly complicated since we need to save and restore
3383+ * the suspension and critical nesting counts, as well as release
3384+ * and reacquire the correct locks. And then, do it all over again
3385+ * if our state changed again during the reacquisition. */
3386+ uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT ( xCoreID );
3387+
3388+ if ( uxPrevCriticalNesting > 0U )
3389+ {
3390+ portSET_CRITICAL_NESTING_COUNT ( xCoreID , 0U );
3391+ portRELEASE_SPINLOCK ( xCoreID , & pxCurrentTCBs [ xCoreID ]-> xTCBSpinlock );
3392+ }
3393+ else
3394+ {
3395+ /* The scheduler is suspended. uxSchedulerSuspended is updated
3396+ * only when the task is not requested to yield. */
3397+ mtCOVERAGE_TEST_MARKER ();
3398+ }
3399+
3400+ portMEMORY_BARRIER ();
3401+
3402+ portENABLE_INTERRUPTS ();
3403+
3404+ /* Enabling interrupts should cause this core to immediately service
3405+ * the pending interrupt and yield. After servicing the pending interrupt,
3406+ * the task needs to re-evaluate its run state within this loop, as
3407+ * other cores may have requested this task to yield, potentially altering
3408+ * its run state. */
3409+
3410+ portDISABLE_INTERRUPTS ();
3411+
3412+ xCoreID = ( BaseType_t ) portGET_CORE_ID ();
3413+ portGET_SPINLOCK ( xCoreID , & pxCurrentTCBs [ xCoreID ]-> xTCBSpinlock );
3414+
3415+ portSET_CRITICAL_NESTING_COUNT ( xCoreID , uxPrevCriticalNesting );
3416+ }
3417+ }
3418+
3419+ void vTaskTCBEnterCritical ( void )
3420+ {
3421+ if ( xSchedulerRunning != pdFALSE )
3422+ {
3423+ portDISABLE_INTERRUPTS ();
3424+ {
3425+ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
3426+
3427+ portGET_SPINLOCK ( xCoreID , & pxCurrentTCBs [ xCoreID ]-> xTCBSpinlock );
3428+
3429+ portINCREMENT_CRITICAL_NESTING_COUNT ( xCoreID );
3430+
3431+ if ( ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 1U ) &&
3432+ ( pxCurrentTCBs [ xCoreID ]-> uxPreemptionDisable == 0U ) )
3433+ {
3434+ prvTaskTCBLockCheckForRunStateChange ();
3435+ }
3436+ }
3437+ }
3438+ }
3439+
3440+ void vTaskTCBExitCritical ( void )
3441+ {
3442+ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
3443+
3444+ if ( xSchedulerRunning != pdFALSE )
3445+ {
3446+ if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) > 0U )
3447+ {
3448+ BaseType_t xYieldCurrentTask = pdFALSE ;
3449+
3450+ /* Get the xYieldPending stats inside the critical section. */
3451+ if ( pxCurrentTCBs [ xCoreID ]-> uxPreemptionDisable == 0U )
3452+ {
3453+ xYieldCurrentTask = xYieldPendings [ xCoreID ];
3454+ }
3455+
3456+ portRELEASE_SPINLOCK ( xCoreID , & pxCurrentTCBs [ xCoreID ]-> xTCBSpinlock );
3457+
3458+ portDECREMENT_CRITICAL_NESTING_COUNT ( xCoreID );
3459+
3460+ /* If the critical nesting count is 0, enable interrupts */
3461+ if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 0U )
3462+ {
3463+ portENABLE_INTERRUPTS ();
3464+
3465+ if ( xYieldCurrentTask != pdFALSE )
3466+ {
3467+ portYIELD ();
3468+ }
3469+ }
3470+ }
3471+ }
3472+ }
3473+
3474+ #endif /* #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
3475+ /*-----------------------------------------------------------*/
3476+
33143477#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
33153478
33163479 void vTaskPreemptionDisable ( const TaskHandle_t xTask )
@@ -3319,8 +3482,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33193482
33203483 traceENTER_vTaskPreemptionDisable ( xTask );
33213484
3322- #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3323- vKernelLightWeightEnterCritical ();
3485+ #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3486+ vTaskTCBEnterCritical ();
33243487 #else
33253488 kernelENTER_CRITICAL ();
33263489 #endif
@@ -3337,8 +3500,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33373500 mtCOVERAGE_TEST_MARKER ();
33383501 }
33393502 }
3340- #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3341- vKernelLightWeightExitCritical ();
3503+ #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3504+ vTaskTCBExitCritical ();
33423505 #else
33433506 kernelEXIT_CRITICAL ();
33443507 #endif
@@ -3356,15 +3519,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33563519 TCB_t * pxTCB ;
33573520 UBaseType_t uxDeferredAction = 0U ;
33583521 BaseType_t xAlreadyYielded = pdFALSE ;
3522+ BaseType_t xTaskRequestedToYield = pdFALSE ;
33593523
3360- #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3361- vKernelLightWeightEnterCritical ();
3524+ #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3525+ vTaskTCBEnterCritical ();
33623526 #else
33633527 kernelENTER_CRITICAL ();
33643528 #endif
33653529 {
33663530 if ( xSchedulerRunning != pdFALSE )
33673531 {
3532+ /* Current task running on the core can not be changed by other core.
3533+ * Get TCB from handle is safe to call within TCB critical section. */
33683534 pxTCB = prvGetTCBFromHandle ( xTask );
33693535 configASSERT ( pxTCB != NULL );
33703536 configASSERT ( pxTCB -> uxPreemptionDisable > 0U );
@@ -3381,8 +3547,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33813547 {
33823548 if ( ( xYieldPendings [ pxTCB -> xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING ( pxTCB ) != pdFALSE ) )
33833549 {
3384- prvYieldCore ( pxTCB -> xTaskRunState );
3385- xAlreadyYielded = pdTRUE ;
3550+ xTaskRequestedToYield = pdTRUE ;
33863551 }
33873552 else
33883553 {
@@ -3400,8 +3565,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34003565 mtCOVERAGE_TEST_MARKER ();
34013566 }
34023567 }
3403- #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3404- vKernelLightWeightExitCritical ();
3568+ #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3569+ vTaskTCBExitCritical ();
34053570 #else
34063571 kernelEXIT_CRITICAL ();
34073572 #endif
@@ -3424,6 +3589,26 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34243589 /* Any deferred action on the task would result in a context switch. */
34253590 xAlreadyYielded = pdTRUE ;
34263591 }
3592+ else
3593+ {
3594+ if ( xTaskRequestedToYield != pdFALSE )
3595+ {
3596+ /* prvYieldCore must be called in critical section. */
3597+ kernelENTER_CRITICAL ();
3598+ {
3599+ pxTCB = prvGetTCBFromHandle ( xTask );
3600+ /* There is gap between TCB critical section and kernel critical section.
3601+ * Checking the yield pending again to prevent that the current task
3602+ * already handle the yield request. */
3603+ if ( ( xYieldPendings [ pxTCB -> xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING ( pxTCB ) != pdFALSE ) )
3604+ {
3605+ prvYieldCore ( pxTCB -> xTaskRunState );
3606+ }
3607+ }
3608+ kernelEXIT_CRITICAL ();
3609+ xAlreadyYielded = pdTRUE ;
3610+ }
3611+ }
34273612
34283613 return xAlreadyYielded ;
34293614 }
@@ -7576,7 +7761,8 @@ static void prvResetNextTaskUnblockTime( void )
75767761 * interrupt. Only assert if the critical nesting count is 1 to
75777762 * protect against recursive calls if the assert function also uses a
75787763 * critical section. */
7579- if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 1U )
7764+ if ( ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 1U ) &&
7765+ ( pxCurrentTCBs [ xCoreID ]-> uxPreemptionDisable == 0U ) )
75807766 {
75817767 portASSERT_IF_IN_ISR ();
75827768
0 commit comments