Skip to content
Open
154 changes: 137 additions & 17 deletions event_groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,48 @@
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} EventGroup_t;

/*-----------------------------------------------------------*/

/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxEventBits->xISRSpinlock, puxSavedInterruptStatus )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxEventBits->xISRSpinlock )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Code Style Issue: Inconsistent macro definition style.

The non-granular lock versions of these macros have semicolons, which is inconsistent with FreeRTOS style and can cause issues in certain contexts.

Fix:

#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
    #define event_groupsENTER_CRITICAL( pxEventBits )                                      taskENTER_CRITICAL()
    #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus )    do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 )
    #define event_groupsEXIT_CRITICAL( pxEventBits )                                       taskEXIT_CRITICAL()
    #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits )      taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

Remove the trailing semicolons to maintain consistency with FreeRTOS coding standards.

#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
* after locking the event group, thus allowing the caller to execute non-deterministic
* operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
* preemption for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
Expand All @@ -79,6 +117,25 @@
const EventBits_t uxBitsToWaitFor,
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;

/*-----------------------------------------------------------*/

/*
* Macros used to lock and unlock an event group. When a task locks an,
* event group, the task will have thread safe non-deterministic access to
* the event group.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the task unlocks the event group, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Macro Issue: Missing return value in unlock macro.

The event_groupsUNLOCK macro should return a value consistent with the xTaskResumeAll() function it replaces.

Fix:

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
    #define event_groupsLOCK( pxEventBits )      prvLockEventGroupForTasks( pxEventBits )
    #define event_groupsUNLOCK( pxEventBits )    prvUnlockEventGroupForTasks( pxEventBits )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
    #define event_groupsLOCK( pxEventBits )      vTaskSuspendAll()
    #define event_groupsUNLOCK( pxEventBits )    xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

Note the removal of the semicolon in the granular lock version to match the non-granular version's return behavior.

#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*-----------------------------------------------------------*/

#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
Expand Down Expand Up @@ -122,6 +179,13 @@
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -167,6 +231,13 @@
}
#endif /* configSUPPORT_STATIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -202,7 +273,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
uxOriginalBitValue = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -245,7 +316,7 @@
}
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -267,7 +338,7 @@
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
uxReturn = pxEventBits->uxEventBits;

Expand All @@ -284,7 +355,7 @@
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

xTimeoutOccurred = pdTRUE;
}
Expand Down Expand Up @@ -333,7 +404,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -401,7 +472,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -422,7 +493,7 @@

if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
Expand All @@ -447,7 +518,7 @@

xTimeoutOccurred = pdTRUE;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );
}
else
{
Expand Down Expand Up @@ -482,7 +553,7 @@
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );

taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );

Expand All @@ -493,7 +564,7 @@
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

traceRETURN_xEventGroupClearBits( uxReturn );

Expand Down Expand Up @@ -524,19 +595,19 @@
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus;
EventGroup_t const * const pxEventBits = xEventGroup;
EventGroup_t * const pxEventBits = xEventGroup;
EventBits_t uxReturn;

traceENTER_xEventGroupGetBitsFromISR( xEventGroup );

/* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, &uxSavedInterruptStatus );
{
uxReturn = pxEventBits->uxEventBits;
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );

traceRETURN_xEventGroupGetBitsFromISR( uxReturn );

Expand Down Expand Up @@ -564,10 +635,17 @@

pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList );
vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

pxListItem = listGET_HEAD_ENTRY( pxList );

/* Set the bits. */
Expand Down Expand Up @@ -638,8 +716,12 @@

/* Snapshot resulting bits. */
uxReturnBits = pxEventBits->uxEventBits;

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

traceRETURN_xEventGroupSetBits( uxReturnBits );

Expand All @@ -658,19 +740,30 @@

pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_DELETE( xEventGroup );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
* and cannot therefore have any bits set. */
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
Expand Down Expand Up @@ -774,6 +867,33 @@
traceRETURN_vEventGroupClearBitsCallback();
}
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );

/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
* the event group while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );

/* Re-enable preemption */
vTaskPreemptionEnable( NULL );

/* We assume that the task was preempted when preemption was enabled */
return pdTRUE;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
const EventBits_t uxBitsToWaitFor,
Expand Down
Loading