diff --git a/event_groups.c b/event_groups.c index 6a2ba07617..2f7d63dc04 100644 --- a/event_groups.c +++ b/event_groups.c @@ -63,10 +63,48 @@ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xTaskSpinlock; + portSPINLOCK_TYPE xISRSpinlock; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } EventGroup_t; /*-----------------------------------------------------------*/ +/* + * Macros to mark the start and end of a critical code region. + */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define event_groupsENTER_CRITICAL( pxEventBits ) portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ) + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) portENTER_CRITICAL_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) &( pxEventBits->xISRSpinlock ) ) + #define event_groupsEXIT_CRITICAL( pxEventBits ) portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ) + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) &( pxEventBits->xISRSpinlock ) ) + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define event_groupsENTER_CRITICAL( pxEventBits ) do { ( void ) pxEventBits; taskENTER_CRITICAL(); } while( 0 ) + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) do { ( void ) pxEventBits; taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) + #define event_groupsEXIT_CRITICAL( pxEventBits ) do { ( void ) pxEventBits; taskEXIT_CRITICAL(); } while( 0 ) + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) do { ( void ) pxEventBits; taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } while( 0 ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/* + * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows + * ISRs to pend access to the event group. Caller cannot be preempted by other tasks + * after locking the event group, thus allowing the caller to execute non-deterministic + * operations. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + +/* + * Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables + * preemption for the caller. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* * Test the bits set in uxCurrentEventBits to see if the wait condition is met. * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is @@ -79,6 +117,25 @@ const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/* + * Macros used to lock and unlock an event group. When a task lockss an, + * event group, the task will have thread safe non-deterministic access to + * the event group. + * - Concurrent access from other tasks will be blocked by the xTaskSpinlock + * - Concurrent access from ISRs will be pended + * + * When the task unlocks the event group, all pended access attempts are handled. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) + #define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits ); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() + #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /*-----------------------------------------------------------*/ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) @@ -122,6 +179,13 @@ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); + portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -167,6 +231,13 @@ } #endif /* configSUPPORT_STATIC_ALLOCATION */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); + portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -202,7 +273,7 @@ } #endif - vTaskSuspendAll(); + event_groupsLOCK( pxEventBits ); { uxOriginalBitValue = pxEventBits->uxEventBits; @@ -245,7 +316,7 @@ } } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -267,7 +338,7 @@ if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ - taskENTER_CRITICAL(); + event_groupsENTER_CRITICAL( pxEventBits ); { uxReturn = pxEventBits->uxEventBits; @@ -284,7 +355,7 @@ mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + event_groupsEXIT_CRITICAL( pxEventBits ); xTimeoutOccurred = pdTRUE; } @@ -333,7 +404,7 @@ } #endif - vTaskSuspendAll(); + event_groupsLOCK( pxEventBits ); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; @@ -401,7 +472,7 @@ traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -422,7 +493,7 @@ if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { - taskENTER_CRITICAL(); + event_groupsENTER_CRITICAL( pxEventBits ); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; @@ -447,7 +518,7 @@ xTimeoutOccurred = pdTRUE; } - taskEXIT_CRITICAL(); + event_groupsEXIT_CRITICAL( pxEventBits ); } else { @@ -482,7 +553,7 @@ configASSERT( xEventGroup ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); - taskENTER_CRITICAL(); + event_groupsENTER_CRITICAL( pxEventBits ); { traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); @@ -493,7 +564,7 @@ /* Clear the bits. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - taskEXIT_CRITICAL(); + event_groupsEXIT_CRITICAL( pxEventBits ); traceRETURN_xEventGroupClearBits( uxReturn ); @@ -524,7 +595,7 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { UBaseType_t uxSavedInterruptStatus; - EventGroup_t const * const pxEventBits = xEventGroup; + EventGroup_t * const pxEventBits = xEventGroup; EventBits_t uxReturn; traceENTER_xEventGroupGetBitsFromISR( xEventGroup ); @@ -532,11 +603,11 @@ /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ); { uxReturn = pxEventBits->uxEventBits; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ); traceRETURN_xEventGroupGetBitsFromISR( uxReturn ); @@ -564,10 +635,17 @@ pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); - vTaskSuspendAll(); + event_groupsLOCK( pxEventBits ); { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + + /* We are about to access the kernel data group non-deterministically, + * thus we suspend the kernel data group.*/ + vTaskSuspendAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + pxListItem = listGET_HEAD_ENTRY( pxList ); /* Set the bits. */ @@ -638,8 +716,12 @@ /* Snapshot resulting bits. */ uxReturnBits = pxEventBits->uxEventBits; + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) xTaskResumeAll(); + ( void ) event_groupsUNLOCK( pxEventBits ); traceRETURN_xEventGroupSetBits( uxReturnBits ); @@ -658,10 +740,17 @@ pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); - vTaskSuspendAll(); + event_groupsLOCK( pxEventBits ); { traceEVENT_GROUP_DELETE( xEventGroup ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + + /* We are about to access the kernel data group non-deterministically, + * thus we suspend the kernel data group.*/ + vTaskSuspendAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) { /* Unblock the task, returning 0 as the event list is being deleted @@ -669,8 +758,12 @@ configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); } + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) xTaskResumeAll(); + ( void ) event_groupsUNLOCK( pxEventBits ); #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) { @@ -775,6 +868,42 @@ } /*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) + { + /* Disable preemption so that the current task cannot be preempted by another task */ + vTaskPreemptionDisable( NULL ); + + portDISABLE_INTERRUPTS(); + + /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing + * the event group while it is suspended. */ + portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); + + portENABLE_INTERRUPTS(); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) + { + portDISABLE_INTERRUPTS(); + + /* Release the previously held task spinlock */ + portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); + + portENABLE_INTERRUPTS(); + + /* Re-enable preemption */ + vTaskPreemptionEnable( NULL ); + + /* We assume that the task was preempted when preemption was enabled */ + return pdTRUE; + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/*-----------------------------------------------------------*/ + static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) diff --git a/granular_locks_v4.md b/granular_locks_v4.md new file mode 100644 index 0000000000..52f90250ba --- /dev/null +++ b/granular_locks_v4.md @@ -0,0 +1,411 @@ +# Introduction + +Currently, the SMP FreeRTOS kernel implements critical section using a "Global locking" approach, where all data is protected by a pair of spinlocks (namely the Task Lock and ISR Lock). This means that every critical section contests for this pair of spinlocks, even if those critical sections access unrelated/orthogonal data. + +The goal of this proposal is to use granular or localized spinlocks so that concurrent access to different data groups do no contest for the same spinlocks. This will reduce lock contention and hopefully increase performance of the SMP FreeRTOS kernel. + +This proposal describes a **"Dual Spinlock With Data Group Locking"** approach to granular locking. + +Source code changes are based off release V11.1.0 of the FreeRTOS kernel. + +# Data Groups + +To make the spinlocks granular, FreeRTOS data will be organized into the following data groups, where each data group is protected by their own set of spinlocks. + +- Kernel Data Group + - All data in `tasks.c` and all event lists (e.g., `xTasksWaitingToSend` and `xTasksWaitingToReceive` in the queue objects) +- Queue Data Group + - Each queue object (`Queue_t`) is its own data group (excluding the task lists) +- Event Group Data Group + - Each event group object (`EventGroup_t`) is its own data group (excluding the task lists) +- Stream Buffer Data Group + - Each stream buffer object (`StreamBuffer_t`) is its own data group (excluding the task lists) +- Timers + - All data in `timers.c` and timer objects, belong to the same Timer Data Group +- User/Port Data Groups + - The user and ports are free to organize their own data in to data groups of their choosing + +# Dual Spinlock With Data Group Locking + +The **"Dual Spinlock With Data Group Locking"** uses a pair of spinlocks to protect each data group (namely the `xTaskSpinlock` and `xISRSpinlock` spinlocks). + +```c +typedef struct +{ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xTaskSpinlock; + portSPINLOCK_TYPE xISRSpinlock; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +} xSomeDataGroup_t; +``` + +However, each data group must also allow for non-deterministic access with interrupts **enabled** by providing a pair of lock/unlock functions. These functions must block access to other tasks trying to access members of a data group and must pend access to ISRs trying to access members of the data group. + +```c +static void prvLockSomeDataGroupForTasks( xSomeDataGroup_t * const pxSomDataGroup ); +static void prvUnlockSomeDataGroupForTasks( xSomeDataGroup_t * const pxSomDataGroup ); +``` + +In simple terms, the "Dual Spinlock With Data Group Locking" is an extension is the existing dual spinlock (i.e., Task and ISR spinlock) approach used in the SMP FreeRTOS kernel, but replicated across different data groups. + +## Data Group Critical Sections (Granular Locks) + +A critical section for a data group can be achieved as follows: + +- When entering a data group critical section from a task + 1. Disable interrupts + 2. Take `xTaskSpinlock` of data group + 3. Take `xISRSpinlock` of data group + 4. Increment nesting count +- When entering data group critical section from an ISR + 1. Disable interrupts + 2. Take `xISRSpinlock` of data group + 3. Increment nesting count + +When exiting a data group critical section, the procedure is reversed. Furthermore, since yield requests are pended when inside a critical section, exiting a task critical section will also need to handle any pended yields. + +- When exiting a data group critical section from a task + 1. Release `xISRSpinlock` of data group + 2. Release `xTaskSpinlock` of data group + 3. Decrement nesting count + 4. Reenable interrupts if nesting count is 0 + 5. Trigger yield if there is a yield pending +- When exiting data group critical section form an ISR + 1. Release `xISRSpinlock` of data group + 2. Decrement nesting count + 3. Reenable interrupts if nesting count is 0 + +Entering multiple data group critical sections in a nested manner is permitted. This means, if a code path has already entered a critical section in data group A, it can then enter a critical section in data group B. This is analogous to nested critical sections. However, care must be taken to avoid deadlocks. This can be achieved by organizing data groups into a hierarchy, where a higher layer data group cannot nest into a lower one. + +``` + +-------------------+ + | Kernel Data Group | + +-------------------+ + + +-------------------+ +---------------------------+ +--------------------------+ +------------------+ + | Queue Data Groups | | Stream Buffer Data Groups | | Event Groups Data Groups | | Timer Data Group | + +-------------------+ +---------------------------+ +--------------------------+ +------------------+ + ++------------------------------------------------------------------------------------------------------+ +| User Data Groups | ++------------------------------------------------------------------------------------------------------+ +``` + +If nested locking only occurs from bottom up (e.g., User data group can nested into a Queue data group which in turn can nested into Kernel data group), then deadlocking will never occur. + +## Data Group Locking + +FreeRTOS does not permit walking linked lists while interrupts are disabled to ensure deterministic ISR latency. Therefore, each data group must provide a method of locking so that non-deterministic operations can be executed for a data group. While a data group is locked: + +- Preemption is disabled for the current task +- Interrupts remained enabled +- The data group's `xTaskSpinlock` is taken to prevent tasks running on other cores from accessing the data group +- Any ISR that attempts to update the data group will have their access pended. These pended accesses will be handled on resumption + +The logic of suspending a data group is analogous to the logic of `vTaskSuspendAll()`/`xTaskResumeAll()` and `prvLockQueue()`/`prvUnlockQueue()` in the existing SMP kernel. + +The details of how ISR accesses are pended during suspension will be specific to each data group type, thus the implementation of the suspend/resumption functions also be specified to each data group type. However, the procedure for data group suspension and resumption will typically be as follows: + +- Suspension + 1. Disable preemption + 2. Lock the data group + 3. Set a suspension flag that indicates the data group is suspended + 4. Unlock the data group, but keep holding `xTaskSpinlock` +- Resumption + 1. Lock the data group + 2. Clear the suspension flag + 3. Handle all pended accesses from ISRs + 4. Unlock the data group, thus releasing `xTaskSpinlock` + +Locking multiple data groups in a nested manner is permitted, meaning if a code path has already locked data group A, it can then lock data group B. This is analogous to nested `vTaskSuspendAll()` calls. Similar to data group locking, deadlocks can be avoided by organizing data groups into a hierarchy. + +## Thread Safety Check + +Under SMP, there are four sources of concurrent access for a particular data group: + +- Preempting task on the same core +- Preempting ISR on the same core +- Concurrent task on another core +- Concurrent ISR on another core + +This section checks that the data group critical section and locking mechanisms mentioned, ensure thread safety from each concurrent source of access. + +- Data Group Critical Section from tasks: Interrupts are disabled, `xTaskSpinlock` and `xISRSpinlock` are taken + - Task (same core): Context switch cannot occur because interrupts are disabled + - Task (other cores): The task will spin on `xTaskSpinlock` + - ISR (same core): Interrupts on the current core are disabled + - ISR (other cores): ISR will spin on `xISRSpinlock` + +- Data Group Critical Sections from ISRs: Interrupts are disabled, `xISRSpinlock` is taken + - Task (same core): Context switch cannot occur because we are in an ISR + - Task (other cores): The task will spin on `xISRSpinlock` + - ISR (same core): Interrupts on the current core are disabled + - ISR (other cores): ISR will spin on `xISRSpinlock` + +- Data Group Locking from tasks: Preemption is disabled, `xTaskSpinlock` is taken + - Task (same core): Context switch cannot occur because preemption is disabled + - Task (other cores): The task will spin on `xTaskSpinlock` + - ISR (same core): Critical section is entered because `xISRSpinlock` is not held, but access is pended + - ISR (other cores): Critical section is entered because `xISRSpinlock` is not held, but access is pended + +# Public API Changes + +To support **"Dual Spinlock With Data Group Locking"**, the following changes have been made to the public facing API. These changes are non-breaking, meaning that applications that can build against the existing SMP FreeRTOS kernel will still be able to build even with granular locking enabled (albeit less performant). + +The following APIs have been added to enter/exit a critical section in a data group. This are called by FreeRTOS source code to mark critical sections in data groups. However, users can also create their own data groups and enter/exit critical sections in the same manner. + +If granular locking is disabled, these macros simply revert to being the standard task enter/exit critical macros. + +```c +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define data_groupENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) + #define data_groupENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) pxISRSpinlock ) + #define data_groupEXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) + #define data_groupEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define data_groupENTER_CRITICAL() taskENTER_CRITICAL() + #define data_groupENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR() + #define data_groupEXIT_CRITICAL() taskEXIT_CRITICAL() + #define data_groupEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +``` + +In case of the kernel data group (tasks.c), the granular locking macros make use of the existing `vTaskEnter/ExitCritical()` functions to establish critical sections. + + +```c +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelENTER_CRITICAL() vTaskEnterCritical() + #define kernelENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define kernelEXIT_CRITICAL() vTaskExitCritical() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) vTaskExitCriticalFromISR( uxSavedInterruptStatus ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define kernelENTER_CRITICAL() taskENTER_CRITICAL() + #define kernelENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR() + #define kernelEXIT_CRITICAL() taskEXIT_CRITICAL() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +``` + +The previous critical section macros, viz., `taskENTER/EXIT_CRITICAL()` are still provided and can be called by users. However, FreeRTOS source code will no longer call them. If called by the users, the port should implement a "user" data group. As a result, if an application previously relied on `taskENTER/EXIT_CRITICAL()` for thread safe access to some user data, the same code is still thread safe with granular locking enabled. + +```c +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) +``` + +# Porting Interface + +To support **"Dual Spinlock With Data Group Locking"**, ports will need to provide the following macro definitions + +## Port Config + +The ports will need to provide the following port configuration macros + +```c +#define portUSING_GRANULAR_LOCKS 1 // Enables usage of granular locks +#define portCRITICAL_NESTING_IN_TCB 0 // Disable critical nesting in TCB. Ports will need to track their own critical nesting +``` + +## Spinlocks + +Ports will need to provide the following spinlock related macros macros + +```c +/* +Data type for the port's implementation of a spinlock +*/ +#define portSPINLOCK_TYPE port_spinlock_t +``` + +Macros are provided for the spinlocks for initializing them either statically or dynamically. This is reflected in the macros API pattern. + +```c +#define portINIT_SPINLOCK( pxSpinlock ) _port_spinlock_init( pxSpinlock ) +#define portINIT__SPINLOCK_STATIC PORT_SPINLOCK_STATIC_INIT +``` + +## Critical Section Macros + +The port will need to provide implementations for macros to enter/exit a data group critical section according the procedures described above. Typical implementations of each macro is demonstrated below: + +```c +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define portENTER_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) vPortEnterCriticalDataGroup( pxTaskSpinlock, pxISRSpinlock ) + #define portENTER_CRITICAL_DATA_GROUP_FROM_ISR( pxISRSpinlock ) uxPortEnterCriticalDataGroupFromISR( pxISRSpinlock ) + #define portEXIT_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) vPortExitCriticalDataGroup( pxTaskSpinlock, pxISRSpinlock ) + #define portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) vPortExitCriticalDataGroupFromISR( x, pxISRSpinlock ) +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +``` + +Example implementation of `portENTER_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock )` and `portEXIT_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock )`. Note that: + +- `pxTaskSpinlock` is made optional in case users want to create their own data groups that are protected only be a single lock. +- The kernel implements the `xTaskUnlockCanYield()` function to indicate whether an yield should occur when a critical section exits. This function takes into account whether there are any pending yields and whether preemption is currently disabled. + +```c +void vPortEnterCriticalDataGroup( port_spinlock_t *pxTaskSpinlock, port_spinlock_t *pxISRSpinlock ) +{ + portDISABLE_INTERRUPTS(); + + BaseType_t xCoreID = xPortGetCoreID(); + + /* Task spinlock is optional and is always taken first */ + if( pxTaskSpinlock != NULL ) + { + vPortSpinlockTake( pxTaskSpinlock, portMUX_NO_TIMEOUT ); + uxCriticalNesting[ xCoreID ]++; + } + + /* ISR spinlock must always be provided */ + vPortSpinlockTake( pxISRSpinlock, portMUX_NO_TIMEOUT ); + uxCriticalNesting[ xCoreID ]++; +} + +void vPortExitCriticalDataGroup( port_spinlock_t *pxTaskSpinlock, port_spinlock_t *pxISRSpinlock ) +{ + BaseType_t xCoreID = xPortGetCoreID(); + BaseType_t xYieldCurrentTask; + + configASSERT( uxCriticalNesting[ xCoreID ] > 0U ); + + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xTaskUnlockCanYield(); + + /* ISR spinlock must always be provided */ + vPortSpinlockRelease( pxISRSpinlock ); + uxCriticalNesting[ xCoreID ]--; + + /* Task spinlock is optional and is always taken first */ + if( pxTaskSpinlock != NULL ) + { + vPortSpinlockRelease( pxTaskSpinlock); + uxCriticalNesting[ xCoreID ]--; + } + + assert(uxCriticalNesting[ xCoreID ] >= 0); + + if( uxCriticalNesting[ xCoreID ] == 0 ) + { + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets xYieldPending to + * true. So now that we have exited the critical section check if xYieldPending + * is true, and if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +``` + +Example implementation of `portENTER_CRITICAL_DATA_GROUP_FROM_ISR( pxISRSpinlock )` and `portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( x, pxISRSpinlock )`. Note that only `pxISRSpinlock` needs to be provided since ISR critical sections take a single lock. + +```c +UBaseType_t uxPortLockDataGroupFromISR( port_spinlock_t *pxISRSpinlock ) +{ + UBaseType_t uxSavedInterruptStatus = 0; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + + vPortSpinlockTake( pxISRSpinlock, portMUX_NO_TIMEOUT ); + uxCriticalNesting[xPortGetCoreID()]++; + + return uxSavedInterruptStatus; +} + +```c +void vPortUnlockDataGroupFromISR( UBaseType_t uxSavedInterruptStatus, port_spinlock_t *pxISRSpinlock ) +{ + BaseType_t xCoreID = xPortGetCoreID(); + + vPortSpinlockRelease( pxISRSpinlock ); + uxCriticalNesting[ xCoreID ]--; + + assert(uxCriticalNesting[ xCoreID ] >= 0); + + if( uxCriticalNesting[ xCoreID ] == 0 ) + { + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } +} +``` + +# Source Specific Changes + +- Added a `xTaskSpinlock` and `xISRSpinlock` to the data structures of each data group +- All calls to `taskENTER/EXIT_CRITICAL[_FROM_ISR]()` have been replaced with `data_groupENTER/EXIT_CRITICAL[_FROM_ISR]()`. +- Added `xTaskUnlockCanYield()` which indicates whether a yield should occur when exiting a critical (i.e., unlocking a data group). Yields should not occur if preemption is disabled (such as when exiting a critical section inside a suspension block). + +## Tasks (Kernel Data Group) + +- Some functions are called from nested critical sections of other data groups, thus an extra critical section call needs to be added to lock/unlock the kernel data group: + - `vTaskInternalSetTimeOutState()` + - `xTaskIncrementTick()` + - `vTaskSwitchContext()` + - `xTaskRemoveFromEventList()` + - `vTaskInternalSetTimeOutState()` + - `eTaskConfirmSleepModeStatus()` + - `xTaskPriorityDisinherit()` + - `pvTaskIncrementMutexHeldCount()` +- Some functions are called from nested suspension blocks of other data gropus, thus an extra suspend/resume call need to be added: + - `vTaskPlaceOnEventList()` + - `vTaskPlaceOnUnorderedEventList()` + - `vTaskPlaceOnEventListRestricted()` +- `prvCheckForRunStateChange()` has been removed +- Updated `vTaskSuspendAll()` and `xTaskResumeAll()` + - Now holds the `xTaskSpinlock` during kernel suspension + - Also increments/decrements `xPreemptionDisable` to prevent yield from occuring when exiting a critical section from inside a kernel suspension block. + +## Queue + +- Added `queueLOCK()` and `queueUNLOCK()` + - If granular locks are disabled, reverts to the previous `prvLockQueue()` and `prvUnlockQueue()` + - If granular locks are enabled, will lock/unlock the queue data group for tasks + +## Event Groups + +- Added `eventLOCK()` and `eventUNLOCK()` + - If granular locks are disabled, reverts to the previous `vTaskSuspendAll()` and `xTaskResumeAll()` calls + - If granular locks are enabled, will lock/unlock the event groups data group for tasks +- `xEventGroupSetBits()` and `vEventGroupDelete()` will manually walk the task lists (which belong to the kernel data group). Thus, an extra `vTaskSuspendAll()`/`xTaskResumeAll()` is added to ensure that the kernel data group is suspended while walking those tasks lists. + +## Stream Buffer + +- Added `sbLOCK()` and `sbUNLOCK()` + - If granular locks are disabled, reverts to the previous `vTaskSuspendAll()` and `xTaskResumeAll()` calls + - If granular locks are enabled, will lock/unlock the stream buffer data group for tasks + +## Timers + +- Timers don't have a lock/unlock function. The existing `vTaskSuspendAll()`/`xTaskResumeAll()` calls are valid as they rely on freezing the tick count which is part of the kernel data group. + +# Prerequisite Refactoring + +A number of refactoring commits have been added to make the addition of granular locking changes simpler: + +1. Move critical sections inside `xTaskPriorityInherit()` + +Currently, `xTaskPriorityInherit()` is called with wrapping critical sections. The critical sections have now be moved inside the function so that they have access to the kernel data group's spinlocks. + +2. Move critical section into `vTaskPriorityDisinheritAfterTimeout()` + +Currently, `vTaskPriorityDisinheritAfterTimeout()` is called wrapping critical sections, where the highest priority waiting task is separately obtained via `prvGetDisinheritPriorityAfterTimeout()`. The critical section and checking of the highest priority have been all been moved into `vTaskPriorityDisinheritAfterTimeout()` as all of these operations access the kernel data group. + +3. Allow `vTaskPreemptionEnable()` to be nested + +Currently, nested calls of `vTaskPreemptionEnable()` is not supported. However, nested calls are required for granular locking due the occurrence of nested suspension across multiple data groups. + +Thus, `vTaskPreemptionEnable()` has been updated to support nested calls. This is done by changing `xPreemptionDisable` to a count, where a non-zero count means that the current task cannot be preempted. + +# Performance Metrics + +Todo + diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index ce8530e7dc..7155871dfb 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -359,6 +359,10 @@ #define portCRITICAL_NESTING_IN_TCB 0 #endif +#ifndef portUSING_GRANULAR_LOCKS + #define portUSING_GRANULAR_LOCKS 0 +#endif + #ifndef configMAX_TASK_NAME_LEN #define configMAX_TASK_NAME_LEN 16 #endif @@ -444,44 +448,68 @@ #ifndef portRELEASE_TASK_LOCK - #if ( configNUMBER_OF_CORES == 1 ) + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) ) #define portRELEASE_TASK_LOCK( xCoreID ) #else - #error portRELEASE_TASK_LOCK is required in SMP + #error portRELEASE_TASK_LOCK is required in SMP without granular locking feature enabled #endif #endif /* portRELEASE_TASK_LOCK */ #ifndef portGET_TASK_LOCK - #if ( configNUMBER_OF_CORES == 1 ) + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) ) #define portGET_TASK_LOCK( xCoreID ) #else - #error portGET_TASK_LOCK is required in SMP + #error portGET_TASK_LOCK is required in SMP without granular locking feature enabled #endif #endif /* portGET_TASK_LOCK */ #ifndef portRELEASE_ISR_LOCK - #if ( configNUMBER_OF_CORES == 1 ) + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) ) #define portRELEASE_ISR_LOCK( xCoreID ) #else - #error portRELEASE_ISR_LOCK is required in SMP + #error portRELEASE_ISR_LOCK is required in SMP without granular locking feature enabled #endif #endif /* portRELEASE_ISR_LOCK */ #ifndef portGET_ISR_LOCK - #if ( configNUMBER_OF_CORES == 1 ) + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) ) #define portGET_ISR_LOCK( xCoreID ) #else - #error portGET_ISR_LOCK is required in SMP + #error portGET_ISR_LOCK is required in SMP without granular locking feature enabled #endif #endif /* portGET_ISR_LOCK */ +#ifndef portRELEASE_SPINLOCK + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portRELEASE_SPINLOCK is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portGET_SPINLOCK + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portGET_SPINLOCK is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portCHECK_IF_IN_ISR + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portCHECK_IF_IN_ISR is required for granular locking + #endif + +#endif + #ifndef portENTER_CRITICAL_FROM_ISR #if ( configNUMBER_OF_CORES > 1 ) @@ -498,6 +526,62 @@ #endif +#ifndef portENTER_CRITICAL_DATA_GROUP + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portENTER_CRITICAL_DATA_GROUP is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portEXIT_CRITICAL_DATA_GROUP + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portEXIT_CRITICAL_DATA_GROUP is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portENTER_CRITICAL_DATA_GROUP_FROM_ISR + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portENTER_CRITICAL_DATA_GROUP_FROM_ISR is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portEXIT_CRITICAL_DATA_GROUP_FROM_ISR + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portEXIT_CRITICAL_DATA_GROUP_FROM_ISR is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portSPINLOCK_TYPE + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portSPINLOCK_TYPE is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portINIT_SPINLOCK + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portINIT_SPINLOCK is required for SMP with granular locking feature enabled + #endif + +#endif + +#ifndef portINIT_SPINLOCK_STATIC + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #error portINIT_SPINLOCK_STATIC is required for SMP with granular locking feature enabled + #endif + +#endif + #ifndef configUSE_CORE_AFFINITY #define configUSE_CORE_AFFINITY 0 #endif /* configUSE_CORE_AFFINITY */ @@ -2905,11 +2989,16 @@ /* Either variables of tick type cannot be read atomically, or * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when * the tick count is returned to the standard critical section macros. */ - #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() - #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) + #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) ) -#else +#else /* if ( portTICK_TYPE_IS_ATOMIC == 0 ) */ /* The tick type can be read atomically, so critical sections used when the * tick count is returned can be defined away. */ @@ -3179,7 +3268,7 @@ typedef struct xSTATIC_TCB #endif uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xDummy25; + UBaseType_t xDummy25; #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; @@ -3261,6 +3350,10 @@ typedef struct xSTATIC_QUEUE UBaseType_t uxDummy8; uint8_t ucDummy9; #endif + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xDummySpinlock[ 2 ]; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StaticQueue_t; typedef StaticQueue_t StaticSemaphore_t; @@ -3290,6 +3383,10 @@ typedef struct xSTATIC_EVENT_GROUP #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucDummy4; #endif + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xDummySpinlock[ 2 ]; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StaticEventGroup_t; /* @@ -3345,6 +3442,9 @@ typedef struct xSTATIC_STREAM_BUFFER void * pvDummy5[ 2 ]; #endif UBaseType_t uxDummy6; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xDummySpinlock[ 2 ]; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StaticStreamBuffer_t; /* Message buffers are built on stream buffers. */ diff --git a/include/task.h b/include/task.h index a25740e3bb..7d533bd54b 100644 --- a/include/task.h +++ b/include/task.h @@ -3755,6 +3755,14 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #endif +/* + * Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns. + * To be called while data group is locked. + */ +#if ( configNUMBER_OF_CORES > 1 ) + BaseType_t xTaskUnlockCanYield( void ); +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( portUSING_MPU_WRAPPERS == 1 ) /* diff --git a/queue.c b/queue.c index 688fb31136..95e0d6dd24 100644 --- a/queue.c +++ b/queue.c @@ -133,6 +133,11 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxQueueNumber; uint8_t ucQueueType; #endif + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xTaskSpinlock; + portSPINLOCK_TYPE xISRSpinlock; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } xQUEUE; /* The old xQUEUE name is maintained above then typedefed to the new Queue_t @@ -180,7 +185,27 @@ typedef xQUEUE Queue_t; * to indicate that a task may require unblocking. When the queue in unlocked * these lock counts are inspected, and the appropriate action taken. */ -static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#if ( portUSING_GRANULAR_LOCKS == 0 ) + static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ + +/* + * Locks a queue for tasks. Prevents other tasks from accessing the queue but allows + * ISRs to pend access to the queue. Caller cannot be preempted by other tasks + * after locking the queue, thus allowing the caller to execute non-deterministic + * operations. + */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockQueueForTasks( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + +/* + * Unlocks a queue for tasks. Handles all pended access from ISRs, then reenables preemption + * for the caller. + */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvUnlockQueueForTasks( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* * Uses a critical section to determine if there is any data in a queue. @@ -251,12 +276,28 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #endif /*-----------------------------------------------------------*/ +/* + * Macros to mark the start and end of a critical code region. + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define queueENTER_CRITICAL( pxQueue ) portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ), ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ) + #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) portENTER_CRITICAL_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ) + #define queueEXIT_CRITICAL( pxQueue ) portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ), ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ) + #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define queueENTER_CRITICAL( pxQueue ) do { ( void ) pxQueue; taskENTER_CRITICAL(); } while( 0 ) + #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) do { ( void ) pxQueue; taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) + #define queueEXIT_CRITICAL( pxQueue ) do { ( void ) pxQueue; taskEXIT_CRITICAL(); } while( 0 ) + #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) do { ( void ) pxQueue; taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. */ -#define prvLockQueue( pxQueue ) \ - taskENTER_CRITICAL(); \ +#if ( portUSING_GRANULAR_LOCKS == 0 ) + #define prvLockQueue( pxQueue ) \ + queueENTER_CRITICAL( pxQueue ); \ { \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ { \ @@ -267,7 +308,8 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ } \ } \ - taskEXIT_CRITICAL() + queueEXIT_CRITICAL( pxQueue ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ /* * Macro to increment cTxLock member of the queue data structure. It is @@ -298,6 +340,39 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ } while( 0 ) + +/* + * Macro used to lock and unlock a queue. When a task locks a queue, the + * task will have thread safe non-deterministic access to the queue. + * - Concurrent access from other tasks will be blocked by the xTaskSpinlock + * - Concurrent access from ISRs will be pended + * + * When the tasks unlocks the queue, all pended access attempts are handled. + */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define queueLOCK( pxQueue ) prvLockQueueForTasks( pxQueue ) + #define queueUNLOCK( pxQueue, xYieldAPI ) prvUnlockQueueForTasks( pxQueue ) +#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #define queueLOCK( pxQueue ) \ + do { \ + vTaskSuspendAll(); \ + prvLockQueue( ( pxQueue ) ); \ + } while( 0 ) + #define queueUNLOCK( pxQueue, xYieldAPI ) \ + do { \ + BaseType_t xAlreadyYielded; \ + prvUnlockQueue( ( pxQueue ) ); \ + xAlreadyYielded = xTaskResumeAll(); \ + if( ( xAlreadyYielded == pdFALSE ) && ( ( xYieldAPI ) == pdTRUE ) ) \ + { \ + taskYIELD_WITHIN_API(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + } while( 0 ) +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -310,12 +385,22 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, configASSERT( pxQueue ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + if( xNewQueue == pdTRUE ) + { + portINIT_SPINLOCK( &( pxQueue->xTaskSpinlock ) ); + portINIT_SPINLOCK( &( pxQueue->xISRSpinlock ) ); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + if( ( pxQueue != NULL ) && ( pxQueue->uxLength >= 1U ) && /* Check for multiplication overflow. */ ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; @@ -354,7 +439,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); } else { @@ -513,7 +598,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, /* Check for multiplication overflow. */ ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) && /* Check for addition overflow. */ - ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( size_t ) ( uxQueueLength * uxItemSize ) ) ) + ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) ) { /* Allocate enough space to hold the maximum number of items that * can be in the queue at any time. It is valid for uxItemSize to be @@ -700,7 +785,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * calling task is the mutex holder, but not a good way of determining the * identity of the mutex holder, as the holder may change between the * following critical section exiting and the function returning. */ - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxSemaphore ); { if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) { @@ -711,7 +796,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, pxReturn = NULL; } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxSemaphore ); traceRETURN_xQueueGetMutexHolder( pxReturn ); @@ -958,7 +1043,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, for( ; ; ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { /* Is there room on the queue now? The running task must be the * highest priority task wanting to access the queue. If the head item @@ -1064,7 +1149,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } #endif /* configUSE_QUEUE_SETS */ - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_xQueueGenericSend( pdPASS ); @@ -1076,7 +1161,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, { /* The queue was full and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); /* Return to the original privilege level before exiting * the function. */ @@ -1099,13 +1184,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); /* Interrupts and other tasks can send to and receive from the queue * now the critical section has been exited. */ - vTaskSuspendAll(); - prvLockQueue( pxQueue ); + queueLOCK( pxQueue ); /* Update the timeout state to see if it has expired yet. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) @@ -1115,35 +1199,18 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, traceBLOCKING_ON_QUEUE_SEND( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); - /* Unlocking the queue means queue events can effect the - * event list. It is possible that interrupts occurring now - * remove this task from the event list again - but as the - * scheduler is suspended the task will go onto the pending - * ready list instead of the actual ready list. */ - prvUnlockQueue( pxQueue ); - - /* Resuming the scheduler will move tasks from the pending - * ready list into the ready list - so it is feasible that this - * task is already in the ready list before it yields - in which - * case the yield will not cause a context switch unless there - * is also a higher priority task in the pending ready list. */ - if( xTaskResumeAll() == pdFALSE ) - { - taskYIELD_WITHIN_API(); - } + queueUNLOCK( pxQueue, pdTRUE ); } else { /* Try again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); } } else { /* The timeout has expired. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); traceQUEUE_SEND_FAILED( pxQueue ); traceRETURN_xQueueGenericSend( errQUEUE_FULL ); @@ -1193,7 +1260,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue ); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1318,7 +1385,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ); traceRETURN_xQueueGenericSendFromISR( xReturn ); @@ -1371,7 +1438,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1491,7 +1558,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ); traceRETURN_xQueueGiveFromISR( xReturn ); @@ -1525,7 +1592,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, for( ; ; ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1557,7 +1624,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_xQueueReceive( pdPASS ); @@ -1569,7 +1636,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue ); traceRETURN_xQueueReceive( errQUEUE_EMPTY ); @@ -1590,13 +1657,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); /* Interrupts and other tasks can send to and receive from the queue * now the critical section has been exited. */ - vTaskSuspendAll(); - prvLockQueue( pxQueue ); + queueLOCK( pxQueue ); /* Update the timeout state to see if it has expired yet. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) @@ -1607,31 +1673,20 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, { traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - taskYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + queueUNLOCK( pxQueue, pdTRUE ); } else { /* The queue contains data again. Loop back to try and read the * data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); } } else { /* Timed out. If there is no data in the queue exit, otherwise loop * back and attempt to read the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) { @@ -1678,7 +1733,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, for( ; ; ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { /* Semaphores are queues with an item size of 0, and where the * number of messages in the queue is the semaphore's count value. */ @@ -1727,7 +1782,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_xQueueSemaphoreTake( pdPASS ); @@ -1739,7 +1794,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { /* The semaphore count was 0 and no block time is specified * (or the block time has expired) so exit now. */ - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue ); traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY ); @@ -1760,13 +1815,12 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); /* Interrupts and other tasks can give to and take from the semaphore * now the critical section has been exited. */ - vTaskSuspendAll(); - prvLockQueue( pxQueue ); + queueLOCK( pxQueue ); /* Update the timeout state to see if it has expired yet. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) @@ -1783,11 +1837,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { - taskENTER_CRITICAL(); - { - xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); - } - taskEXIT_CRITICAL(); + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); } else { @@ -1797,30 +1847,19 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, #endif /* if ( configUSE_MUTEXES == 1 ) */ vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - taskYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + queueUNLOCK( pxQueue, pdTRUE ); } else { /* There was no timeout and the semaphore count was not 0, so * attempt to take the semaphore again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); } } else { /* Timed out. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); /* If the semaphore count is 0 exit now as the timeout has * expired. Otherwise return to attempt to take the semaphore that is @@ -1835,7 +1874,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * test the mutex type again to check it is actually a mutex. */ if( xInheritanceOccurred != pdFALSE ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { UBaseType_t uxHighestWaitingPriority; @@ -1855,7 +1894,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, /* coverity[overrun] */ vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); } } #endif /* configUSE_MUTEXES */ @@ -1901,7 +1940,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, for( ; ; ) { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1939,7 +1978,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_xQueuePeek( pdPASS ); @@ -1951,7 +1990,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue ); traceRETURN_xQueuePeek( errQUEUE_EMPTY ); @@ -1973,13 +2012,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); /* Interrupts and other tasks can send to and receive from the queue * now that the critical section has been exited. */ - vTaskSuspendAll(); - prvLockQueue( pxQueue ); + queueLOCK( pxQueue ); /* Update the timeout state to see if it has expired yet. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) @@ -1990,31 +2028,20 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, { traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) - { - taskYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + queueUNLOCK( pxQueue, pdTRUE ); } else { /* There is data in the queue now, so don't enter the blocked * state, instead return to try and obtain the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); } } else { /* The timeout has expired. If there is still no data in the queue * exit, otherwise go back and try to read the data again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); + queueUNLOCK( pxQueue, pdFALSE ); if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) { @@ -2064,7 +2091,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2124,7 +2151,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ); traceRETURN_xQueueReceiveFromISR( xReturn ); @@ -2165,7 +2192,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue ); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2186,7 +2213,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ); traceRETURN_xQueuePeekFromISR( xReturn ); @@ -2202,11 +2229,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) configASSERT( xQueue ); - portBASE_TYPE_ENTER_CRITICAL(); + queueENTER_CRITICAL( xQueue ); { uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; } - portBASE_TYPE_EXIT_CRITICAL(); + queueEXIT_CRITICAL( xQueue ); traceRETURN_uxQueueMessagesWaiting( uxReturn ); @@ -2223,11 +2250,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) configASSERT( pxQueue ); - portBASE_TYPE_ENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting ); } - portBASE_TYPE_EXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_uxQueueSpacesAvailable( uxReturn ); @@ -2491,19 +2518,169 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } /*-----------------------------------------------------------*/ -static void prvUnlockQueue( Queue_t * const pxQueue ) -{ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ +#if ( portUSING_GRANULAR_LOCKS == 0 ) + static void prvUnlockQueue( Queue_t * const pxQueue ) + { + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - /* The lock counts contains the number of extra data items placed or - * removed from the queue while the queue was locked. When a queue is - * locked items can be added or removed, but the event lists cannot be - * updated. */ - taskENTER_CRITICAL(); + /* The lock counts contains the number of extra data items placed or + * removed from the queue while the queue was locked. When a queue is + * locked items can be added or removed, but the event lists cannot be + * updated. */ + queueENTER_CRITICAL( pxQueue ); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + * blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + * the queue set caused a higher priority task to unblock. + * A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + * added to the pending ready list as the scheduler is still + * suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + * the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + * a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + queueEXIT_CRITICAL( pxQueue ); + + /* Do the same for the Rx lock. */ + queueENTER_CRITICAL( pxQueue ); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + queueEXIT_CRITICAL( pxQueue ); + } +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockQueueForTasks( Queue_t * const pxQueue ) { + /* Disable preemption so that the current task cannot be preempted by another task */ + vTaskPreemptionDisable( NULL ); + + /* Lock the queue data group so that we can suspend the queue atomically */ + queueENTER_CRITICAL( pxQueue ); + + /* Suspend writing and reading to/from the queue */ + if( pxQueue->cRxLock == queueUNLOCKED ) + { + pxQueue->cRxLock = queueLOCKED_UNMODIFIED; + } + + if( pxQueue->cTxLock == queueUNLOCKED ) + { + pxQueue->cTxLock = queueLOCKED_UNMODIFIED; + } + + /* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks + * on other cores from accessing the queue while it is suspended. */ + portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); + + queueEXIT_CRITICAL( pxQueue ); + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvUnlockQueueForTasks( Queue_t * const pxQueue ) + { + /* Lock the queue data group so that we can handle any pended accesses atomically */ + queueENTER_CRITICAL( pxQueue ); + + /* Release the previously held task spinlock */ + portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); + + /* Handle pended TX access to the queue */ int8_t cTxLock = pxQueue->cTxLock; - /* See if data was added to the queue while it was locked. */ while( cTxLock > queueLOCKED_UNMODIFIED ) { /* Data was posted while the queue was locked. Are any tasks @@ -2576,12 +2753,8 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) } pxQueue->cTxLock = queueUNLOCKED; - } - taskEXIT_CRITICAL(); - /* Do the same for the Rx lock. */ - taskENTER_CRITICAL(); - { + /* Handle pended RX access to the queue */ int8_t cRxLock = pxQueue->cRxLock; while( cRxLock > queueLOCKED_UNMODIFIED ) @@ -2605,17 +2778,19 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) } } - pxQueue->cRxLock = queueUNLOCKED; + queueEXIT_CRITICAL( pxQueue ); + + /* Re-enable preemption */ + vTaskPreemptionEnable( NULL ); } - taskEXIT_CRITICAL(); -} +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) { @@ -2626,7 +2801,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) xReturn = pdFALSE; } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); return xReturn; } @@ -2660,7 +2835,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) { @@ -2671,7 +2846,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) xReturn = pdFALSE; } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); return xReturn; } @@ -3151,7 +3326,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) * time a yield will be performed. If an item is added to the queue while * the queue is locked, and the calling task blocks on the queue, then the * calling task will be immediately unblocked when the queue is unlocked. */ - prvLockQueue( pxQueue ); + queueLOCK( pxQueue ); if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) { @@ -3163,7 +3338,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) mtCOVERAGE_TEST_MARKER(); } - prvUnlockQueue( pxQueue ); + queueUNLOCK( pxQueue, pdFALSE ); traceRETURN_vQueueWaitForMessageRestricted(); } @@ -3215,17 +3390,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) QueueSetHandle_t xQueueSet ) { BaseType_t xReturn; + Queue_t * const pxQueue = xQueueOrSemaphore; traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueue ); { - if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + if( pxQueue->pxQueueSetContainer != NULL ) { /* Cannot add a queue/semaphore to more than one queue set. */ xReturn = pdFAIL; } - else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + else if( pxQueue->uxMessagesWaiting != ( UBaseType_t ) 0 ) { /* Cannot add a queue/semaphore to a queue set if there are already * items in the queue/semaphore. */ @@ -3233,11 +3409,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + pxQueue->pxQueueSetContainer = xQueueSet; xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueue ); traceRETURN_xQueueAddToSet( xReturn ); @@ -3271,12 +3447,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - taskENTER_CRITICAL(); + queueENTER_CRITICAL( pxQueueOrSemaphore ); { /* The queue is no longer contained in the set. */ pxQueueOrSemaphore->pxQueueSetContainer = NULL; } - taskEXIT_CRITICAL(); + queueEXIT_CRITICAL( pxQueueOrSemaphore ); xReturn = pdPASS; } diff --git a/stream_buffer.c b/stream_buffer.c index b1f9c0d57c..857474b011 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -58,6 +58,39 @@ #error INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 to build stream_buffer.c #endif + +/* + * Macros to mark the start and end of a critical code region. + */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define sbENTER_CRITICAL( pxStreamBuffer ) portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxStreamBuffer->xTaskSpinlock ), &( pxStreamBuffer->xISRSpinlock ) ) + #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) portENTER_CRITICAL_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) &( pxStreamBuffer->xISRSpinlock ) ) + #define sbEXIT_CRITICAL( pxEventBits ) portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxStreamBuffer->xTaskSpinlock ), &( pxStreamBuffer->xISRSpinlock ) ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) &( pxStreamBuffer->xISRSpinlock ) ) + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define sbENTER_CRITICAL( pxEventBits ) do { ( void ) pxStreamBuffer; taskENTER_CRITICAL(); } while( 0 ) + #define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) do { ( void ) pxStreamBuffer; taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) + #define sbEXIT_CRITICAL( pxEventBits ) do { ( void ) pxStreamBuffer; taskEXIT_CRITICAL(); } while( 0 ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) do { ( void ) pxStreamBuffer; taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } while( 0 ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/* + * Macro used to lock and unlock a stream buffer. When a task locks a stream + * buffer, the task will have thread safe non-deterministic access to the stream + * buffer. + * - Concurrent access from other tasks will be blocked by the xTaskSpinlock + * - Concurrent access from ISRs will be pended + * + * When the task unlocks the stream buffer, all pended access attempts are handled. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer ) + #define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer ) + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #define sbLOCK( pxStreamBuffer ) vTaskSuspendAll() + #define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll() + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* If the user has not provided application specific Rx notification macros, * or #defined the notification macros away, then provide default implementations * that uses task notifications. */ @@ -65,7 +98,7 @@ #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ do \ { \ - vTaskSuspendAll(); \ + sbLOCK( pxStreamBuffer ); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -76,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); \ + ( void ) sbUNLOCK( pxStreamBuffer ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED */ @@ -105,7 +138,7 @@ do { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ + uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -117,7 +150,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ + sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ @@ -145,7 +178,7 @@ */ #ifndef sbSEND_COMPLETED #define sbSEND_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ + sbLOCK( pxStreamBuffer ); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -156,7 +189,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) xTaskResumeAll() + ( void ) sbUNLOCK( pxStreamBuffer ) #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -184,7 +217,7 @@ do { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ + uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -196,7 +229,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ + sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \ } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -249,8 +282,30 @@ typedef struct StreamBufferDef_t StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ #endif UBaseType_t uxNotificationIndex; /* The index we are using for notification, by default tskDEFAULT_INDEX_TO_NOTIFY. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + portSPINLOCK_TYPE xTaskSpinlock; + portSPINLOCK_TYPE xISRSpinlock; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StreamBuffer_t; +/* + * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer + * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted + * by other tasks after locking the stream buffer, thus allowing the caller to + * execute non-deterministic operations. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + +/* + * Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption + * for the caller. + */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* * The number of bytes available to be read from the buffer. */ @@ -327,6 +382,42 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) + { + /* Disable preemption so that the current task cannot be preempted by another task */ + vTaskPreemptionDisable( NULL ); + + /* Lock the stream buffer data group so that we can suspend the stream buffer atomically */ + sbENTER_CRITICAL( pxStreamBuffer ); + + /* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks + * on other cores from accessing the stream buffer while it is suspended. */ + portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); + + sbEXIT_CRITICAL( pxStreamBuffer ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) + { + /* Lock the stream buffer data group so that we can handle any pended accesses atomically */ + sbENTER_CRITICAL( pxStreamBuffer ); + + /* Release the previously held task spinlock */ + portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); + + sbEXIT_CRITICAL( pxStreamBuffer ); + + /* Re-enable preemption */ + vTaskPreemptionEnable( NULL ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/*-----------------------------------------------------------*/ + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, @@ -405,6 +496,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xTaskSpinlock ) ); + portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xISRSpinlock ) ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xStreamBufferType ); } else @@ -499,6 +597,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * again. */ pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + portINIT_SPINLOCK( &( pxStreamBuffer->xTaskSpinlock ) ); + portINIT_SPINLOCK( &( pxStreamBuffer->xISRSpinlock ) ); + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xStreamBufferType ); /* MISRA Ref 11.3.1 [Misaligned access] */ @@ -614,7 +719,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) #endif /* Can only reset a message buffer if there are no tasks blocked on it. */ - taskENTER_CRITICAL(); + sbENTER_CRITICAL( pxStreamBuffer ); { if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) { @@ -644,7 +749,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + sbEXIT_CRITICAL( pxStreamBuffer ); traceRETURN_xStreamBufferReset( xReturn ); @@ -872,7 +977,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, { /* Wait until the required number of bytes are free in the message * buffer. */ - taskENTER_CRITICAL(); + sbENTER_CRITICAL( pxStreamBuffer ); { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); @@ -887,11 +992,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, } else { - taskEXIT_CRITICAL(); + sbEXIT_CRITICAL( pxStreamBuffer ); break; } } - taskEXIT_CRITICAL(); + sbEXIT_CRITICAL( pxStreamBuffer ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); ( void ) xTaskNotifyWaitIndexed( pxStreamBuffer->uxNotificationIndex, ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); @@ -1087,7 +1192,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, { /* Checking if there is data and clearing the notification state must be * performed atomically. */ - taskENTER_CRITICAL(); + sbENTER_CRITICAL( pxStreamBuffer ); { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); @@ -1112,7 +1217,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + sbEXIT_CRITICAL( pxStreamBuffer ); if( xBytesAvailable <= xBytesToStoreMessageLength ) { @@ -1409,7 +1514,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1426,7 +1531,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); traceRETURN_xStreamBufferSendCompletedFromISR( xReturn ); @@ -1448,7 +1553,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1465,7 +1570,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); traceRETURN_xStreamBufferReceiveCompletedFromISR( xReturn ); diff --git a/tasks.c b/tasks.c index d7153f680e..539fa5da82 100644 --- a/tasks.c +++ b/tasks.c @@ -350,6 +350,38 @@ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ +/* Macros to take and release kernel spinlocks */ +#if ( configNUMBER_OF_CORES > 1 ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskGET_TASK_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define taskRELEASE_TASK_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define taskGET_ISR_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xISRSpinlock ) + #define taskRELEASE_ISR_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock ) + #else + #define taskGET_TASK_LOCK( xCoreID ) portGET_TASK_LOCK( xCoreID ) + #define taskRELEASE_TASK_LOCK( xCoreID ) portRELEASE_TASK_LOCK( xCoreID ) + #define taskGET_ISR_LOCK( xCoreID ) portGET_ISR_LOCK( xCoreID ) + #define taskRELEASE_ISR_LOCK( xCoreID ) portRELEASE_ISR_LOCK( xCoreID ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +/* + * Macros to mark the start and end of a critical code region. + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelENTER_CRITICAL() vTaskEnterCritical() + #define kernelENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define kernelEXIT_CRITICAL() vTaskExitCritical() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) vTaskExitCriticalFromISR( uxSavedInterruptStatus ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define kernelENTER_CRITICAL() taskENTER_CRITICAL() + #define kernelENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR() + #define kernelEXIT_CRITICAL() taskEXIT_CRITICAL() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/*-----------------------------------------------------------*/ + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -378,7 +410,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ + UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -522,6 +554,12 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf #endif +/* Kernel spinlock variables when using granular locking */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ @@ -805,7 +843,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #if ( configNUMBER_OF_CORES > 1 ) static void prvCheckForRunStateChange( void ) { - UBaseType_t uxPrevCriticalNesting; const TCB_t * pxThisTCB; BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -818,6 +855,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) { + UBaseType_t uxPrevCriticalNesting; + /* We are only here if we just entered a critical section * or if we just suspended the scheduler, and another task * has requested that we yield. @@ -831,7 +870,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting > 0U ) { portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U ); - portRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); } else { @@ -840,7 +879,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; mtCOVERAGE_TEST_MARKER(); } - portRELEASE_TASK_LOCK( xCoreID ); + taskRELEASE_TASK_LOCK( xCoreID ); portMEMORY_BARRIER(); configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ); @@ -855,14 +894,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; portDISABLE_INTERRUPTS(); xCoreID = ( BaseType_t ) portGET_CORE_ID(); - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + taskGET_TASK_LOCK( xCoreID ); + taskGET_ISR_LOCK( xCoreID ); portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); if( uxPrevCriticalNesting == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); } } } @@ -925,7 +964,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) #endif { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; @@ -1231,7 +1270,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; ( xYieldPendings[ uxCore ] == pdFALSE ) ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == 0U ) #endif { xLowestPriority = xTaskPriority; @@ -2026,7 +2065,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks = ( UBaseType_t ) ( uxCurrentNumberOfTasks + 1U ); @@ -2084,7 +2123,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, portSETUP_TCB( pxNewTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( xSchedulerRunning != pdFALSE ) { @@ -2104,7 +2143,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks++; @@ -2153,7 +2192,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -2202,7 +2241,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskDelete( xTaskToDelete ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the calling task that is * being deleted. */ @@ -2310,7 +2349,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called @@ -2511,14 +2550,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, else #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxEventList == &xPendingReadyList ) { @@ -2628,7 +2667,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ @@ -2637,7 +2676,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2678,7 +2717,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ @@ -2687,7 +2726,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskPriorityGetFromISR( uxReturn ); @@ -2706,7 +2745,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ @@ -2715,7 +2754,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -2756,7 +2795,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the base priority of the calling * task that is being queried. */ @@ -2765,7 +2804,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn ); @@ -2802,7 +2841,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the calling * task that is being changed. */ @@ -2864,7 +2903,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * there may now be another task of higher priority that * is ready to execute. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxTCB->xPreemptionDisable == pdFALSE ) + if( pxTCB->xPreemptionDisable == 0U ) #endif { xYieldRequired = pdTRUE; @@ -2982,7 +3021,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPrioritySet(); } @@ -2999,7 +3038,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); @@ -3040,7 +3079,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinitySet(); } @@ -3055,14 +3094,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3080,14 +3119,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionDisable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - pxTCB->xPreemptionDisable = pdTRUE; + pxTCB->xPreemptionDisable++; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionDisable(); } @@ -3104,23 +3143,24 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionEnable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); + configASSERT( pxTCB->xPreemptionDisable > 0U ); - pxTCB->xPreemptionDisable = pdFALSE; + pxTCB->xPreemptionDisable--; if( xSchedulerRunning != pdFALSE ) { - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; prvYieldCore( xCoreID ); } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionEnable(); } @@ -3136,7 +3176,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskSuspend( xTaskToSuspend ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the running task that is * being suspended. */ @@ -3224,7 +3264,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); #if ( configNUMBER_OF_CORES == 1 ) { @@ -3234,11 +3274,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Reset the next expected unblock time in case it referred to the * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvResetNextTaskUnblockTime(); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } else { @@ -3387,7 +3427,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( pxTCB != NULL ) #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3408,7 +3448,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } else { @@ -3455,7 +3495,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3511,7 +3551,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskResumeFromISR( xYieldRequired ); @@ -3878,12 +3918,20 @@ void vTaskSuspendAll( void ) * do not otherwise exhibit real time behaviour. */ portSOFTWARE_BARRIER(); - portGET_TASK_LOCK( xCoreID ); + taskGET_TASK_LOCK( xCoreID ); /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The * purpose is to prevent altering the variable when fromISR APIs are readying * it. */ - if( uxSchedulerSuspended == 0U ) + if( ( uxSchedulerSuspended == 0U ) + + /* If the scheduler is being suspended after taking a granular lock (in any data group) + * then we do not check for the run state of a task and we let it run through until + * the granular lock is released. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + && ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + ) { prvCheckForRunStateChange(); } @@ -3892,17 +3940,12 @@ void vTaskSuspendAll( void ) mtCOVERAGE_TEST_MARKER(); } - /* Query the coreID again as prvCheckForRunStateChange may have - * caused the task to get scheduled on a different core. The correct - * task lock for the core is acquired in prvCheckForRunStateChange. */ - xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - portGET_ISR_LOCK( xCoreID ); + taskGET_ISR_LOCK( xCoreID ); /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment * is used to allow calls to vTaskSuspendAll() to nest. */ ++uxSchedulerSuspended; - portRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK( ulState ); } @@ -3998,7 +4041,7 @@ BaseType_t xTaskResumeAll( void ) * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -4007,7 +4050,7 @@ BaseType_t xTaskResumeAll( void ) configASSERT( uxSchedulerSuspended != 0U ); uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U ); - portRELEASE_TASK_LOCK( xCoreID ); + taskRELEASE_TASK_LOCK( xCoreID ); if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { @@ -4097,7 +4140,11 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPendings[ xCoreID ] != pdFALSE ) + if( xYieldPendings[ xCoreID ] != pdFALSE + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U + #endif + ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -4122,7 +4169,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } traceRETURN_xTaskResumeAll( xAlreadyYielded ); @@ -4551,11 +4598,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks++; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xTicksToJump--; } else @@ -4587,11 +4634,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) vTaskSuspendAll(); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks += xTicksToCatchUp; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xYieldOccurred = xTaskResumeAll(); traceRETURN_xTaskCatchUpTicks( xYieldOccurred ); @@ -4628,7 +4675,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -4644,7 +4691,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -4671,11 +4718,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } @@ -4704,6 +4751,10 @@ BaseType_t xTaskIncrementTick( void ) traceENTER_xTaskIncrementTick(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ @@ -4804,12 +4855,17 @@ BaseType_t xTaskIncrementTick( void ) /* Preemption is on, but a context switch should * only be performed if the unblocked task's * priority is higher than the currently executing - * task. + * task and the currently executing task does not + * have preemption disabled. * The case of equal priority tasks sharing * processing time (which happens when both * preemption and time slicing are on) is * handled below.*/ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCB->xPreemptionDisable == 0U ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + ) { xSwitchRequired = pdTRUE; } @@ -4836,14 +4892,32 @@ BaseType_t xTaskIncrementTick( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCB->xPreemptionDisable != 0U ) + { + mtCOVERAGE_TEST_MARKER(); + } + else + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { @@ -4885,7 +4959,11 @@ BaseType_t xTaskIncrementTick( void ) #if ( configNUMBER_OF_CORES == 1 ) { /* For single core the core ID is always 0. */ - if( xYieldPendings[ 0 ] != pdFALSE ) + if( xYieldPendings[ 0 ] != pdFALSE + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ 0 ]->xPreemptionDisable == 0U ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + ) { xSwitchRequired = pdTRUE; } @@ -4902,7 +4980,7 @@ BaseType_t xTaskIncrementTick( void ) for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) #endif { if( xYieldPendings[ xCoreID ] != pdFALSE ) @@ -4940,6 +5018,10 @@ BaseType_t xTaskIncrementTick( void ) #endif } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskIncrementTick( xSwitchRequired ); return xSwitchRequired; @@ -4968,11 +5050,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetApplicationTaskTag(); } @@ -4995,11 +5077,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGetApplicationTaskTag( xReturn ); @@ -5028,11 +5110,11 @@ BaseType_t xTaskIncrementTick( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn ); @@ -5177,18 +5259,23 @@ BaseType_t xTaskIncrementTick( void ) * and move on if another core suspended the scheduler. We should only * do that if the current core has suspended the scheduler. */ - portGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ - portGET_ISR_LOCK( xCoreID ); + taskGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ + taskGET_ISR_LOCK( xCoreID ); { /* vTaskSwitchContext() must never be called from within a critical section. * This is not necessarily true for single core FreeRTOS, but it is for this * SMP port. */ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ); - if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + || ( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable > 0U ) ) + #endif + ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ + /* The scheduler is currently suspended or the task + * has requested to not be preempted - do not allow + * a context switch. */ xYieldPendings[ xCoreID ] = pdTRUE; } else @@ -5259,8 +5346,8 @@ BaseType_t xTaskIncrementTick( void ) #endif } } - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_TASK_LOCK( xCoreID ); traceRETURN_vTaskSwitchContext(); } @@ -5274,8 +5361,15 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE - * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE + * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * This is placed in the list in priority order so the highest priority task @@ -5292,6 +5386,11 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventList(); } /*-----------------------------------------------------------*/ @@ -5304,9 +5403,16 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event groups implementation. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -5322,6 +5428,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnUnorderedEventList(); } /*-----------------------------------------------------------*/ @@ -5336,11 +5447,17 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* This function should not be called by application code hence the - * 'Restricted' in its name. It is not part of the public API. It is - * designed for use by kernel code, and has special calling requirements - - * it should be called with the scheduler suspended. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function should not be called by application code hence the + * 'Restricted' in its name. It is not part of the public API. It is + * designed for use by kernel code, and has special calling requirements - + * it should be called with the scheduler suspended. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * In this case it is assume that this is the only task that is going to @@ -5359,6 +5476,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventListRestricted(); } @@ -5372,8 +5494,30 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) traceENTER_xTaskRemoveFromEventList( pxEventList ); - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + /* Lock the kernel data group as we are about to access its members */ + UBaseType_t uxSavedInterruptStatus; + + if( portCHECK_IF_IN_ISR() == pdTRUE ) + { + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + } + else + { + uxSavedInterruptStatus = 0; + kernelENTER_CRITICAL(); + } + + /* Before taking the kernel lock, another task/ISR could have already + * emptied the pxEventList. So we insert a check here to see if + * pxEventList is empty before attempting to remove an item from it. */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from @@ -5453,6 +5597,26 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +} +else +{ + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; +} + +/* We are done accessing the kernel data group. Unlock it. */ +if( portCHECK_IF_IN_ISR() == pdTRUE ) +{ + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); +} +else +{ + kernelEXIT_CRITICAL(); +} + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } @@ -5516,13 +5680,13 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, { #if ( configUSE_PREEMPTION == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxUnblockedTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -5535,12 +5699,12 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) traceENTER_vTaskSetTimeOutState( pxTimeOut ); configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetTimeOutState(); } @@ -5550,10 +5714,20 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) { traceENTER_vTaskInternalSetTimeOutState( pxTimeOut ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* For internal use only as it does not use a critical section. */ pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskInternalSetTimeOutState(); } /*-----------------------------------------------------------*/ @@ -5568,7 +5742,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -5619,7 +5793,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskCheckForTimeOut( xReturn ); @@ -5919,7 +6093,12 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) traceENTER_eTaskConfirmSleepModeStatus(); - /* This function must be called from a critical section. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function must be called from a critical section. */ + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U ) { @@ -5953,6 +6132,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_eTaskConfirmSleepModeStatus( eReturn ); return eReturn; @@ -6084,7 +6268,7 @@ static void prvCheckTasksWaitingTermination( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { { /* MISRA Ref 11.5.3 [Void pointer assignment] */ @@ -6096,7 +6280,7 @@ static void prvCheckTasksWaitingTermination( void ) --uxDeletedTasksWaitingCleanUp; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); prvDeleteTCB( pxTCB ); } @@ -6104,7 +6288,7 @@ static void prvCheckTasksWaitingTermination( void ) { pxTCB = NULL; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* For SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -6127,12 +6311,12 @@ static void prvCheckTasksWaitingTermination( void ) /* The TCB to be deleted still has not yet been switched out * by the scheduler, so we will just exit this loop early and * try again next time. */ - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); break; } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxTCB != NULL ) { @@ -6254,14 +6438,14 @@ static void prvCheckTasksWaitingTermination( void ) /* Tasks can be in pending ready list and other state list at the * same time. These tasks are in ready state no matter what state * list the task is in. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) { pxTaskStatus->eCurrentState = eReady; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } } else @@ -6583,7 +6767,9 @@ static void prvResetNextTaskUnblockTime( void ) else { #if ( configNUMBER_OF_CORES > 1 ) - taskENTER_CRITICAL(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif { if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) @@ -6596,7 +6782,9 @@ static void prvResetNextTaskUnblockTime( void ) } } #if ( configNUMBER_OF_CORES > 1 ) - taskEXIT_CRITICAL(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif } @@ -6617,91 +6805,95 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority - * inheritance is not applied in this scenario. */ - if( pxMutexHolder != NULL ) + kernelENTER_CRITICAL(); { - /* If the holder of the mutex has a priority below the priority of - * the task attempting to obtain the mutex then it will temporarily - * inherit the priority of the task attempting to obtain the mutex. */ - if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority + * inheritance is not applied in this scenario. */ + if( pxMutexHolder != NULL ) { - /* Adjust the mutex holder state to account for its new - * priority. Only reset the event list item value if the value is - * not being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) - { - listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* If the task being modified is in the ready state it will need - * to be moved into a new list. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + /* If the holder of the mutex has a priority below the priority of + * the task attempting to obtain the mutex then it will temporarily + * inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) { - if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Adjust the mutex holder state to account for its new + * priority. Only reset the event list item value if the value is + * not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); } else { mtCOVERAGE_TEST_MARKER(); } - /* Inherit the priority before being moved into the new list. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; - prvAddTaskToReadyList( pxMutexHolderTCB ); - #if ( configNUMBER_OF_CORES > 1 ) + /* If the task being modified is in the ready state it will need + * to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) { - /* The priority of the task is raised. Yield for this task - * if it is not running. */ - if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE ) + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + #if ( configNUMBER_OF_CORES > 1 ) { - prvYieldForTask( pxMutexHolderTCB ); + /* The priority of the task is raised. Yield for this task + * if it is not running. */ + if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE ) + { + prvYieldForTask( pxMutexHolderTCB ); + } } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; } - #endif /* if ( configNUMBER_OF_CORES > 1 ) */ - } - else - { - /* Just inherit the priority. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; - } - traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); - /* Inheritance occurred. */ - xReturn = pdTRUE; - } - else - { - if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) - { - /* The base priority of the mutex holder is lower than the - * priority of the task attempting to take the mutex, but the - * current priority of the mutex holder is not lower than the - * priority of the task attempting to take the mutex. - * Therefore the mutex holder must have already inherited a - * priority, but inheritance would have occurred if that had - * not been the case. */ + /* Inheritance occurred. */ xReturn = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + * priority of the task attempting to take the mutex, but the + * current priority of the mutex holder is not lower than the + * priority of the task attempting to take the mutex. + * Therefore the mutex holder must have already inherited a + * priority, but inheritance would have occurred if that had + * not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + kernelEXIT_CRITICAL(); traceRETURN_xTaskPriorityInherit( xReturn ); @@ -6720,6 +6912,11 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityDisinherit( pxMutexHolder ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + if( pxMutexHolder != NULL ) { /* A task can only have an inherited priority if it holds the mutex. @@ -6797,6 +6994,11 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskPriorityDisinherit( xReturn ); return xReturn; @@ -6816,88 +7018,95 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask ); - if( pxMutexHolder != NULL ) + kernelENTER_CRITICAL(); { - /* If pxMutexHolder is not NULL then the holder must hold at least - * one mutex. */ - configASSERT( pxTCB->uxMutexesHeld ); - - /* Determine the priority to which the priority of the task that - * holds the mutex should be set. This will be the greater of the - * holding task's base priority and the priority of the highest - * priority task that is waiting to obtain the mutex. */ - if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) - { - uxPriorityToUse = uxHighestPriorityWaitingTask; - } - else + if( pxMutexHolder != NULL ) { - uxPriorityToUse = pxTCB->uxBasePriority; - } + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); - /* Does the priority need to change? */ - if( pxTCB->uxPriority != uxPriorityToUse ) - { - /* Only disinherit if no other mutexes are held. This is a - * simplification in the priority inheritance implementation. If - * the task that holds the mutex is also holding other mutexes then - * the other mutexes may have caused the priority inheritance. */ - if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) { - /* If a task has timed out because it already holds the - * mutex it was trying to obtain then it cannot of inherited - * its own priority. */ - configASSERT( pxTCB != pxCurrentTCB ); - - /* Disinherit the priority, remembering the previous - * priority to facilitate determining the subject task's - * state. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); - uxPriorityUsedOnEntry = pxTCB->uxPriority; - pxTCB->uxPriority = uxPriorityToUse; + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) - { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); - } - else + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) { - mtCOVERAGE_TEST_MARKER(); - } + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); - /* If the running task is not the task that holds the mutex - * then the task that holds the mutex could be in either the - * Ready, Blocked or Suspended states. Only remove the task - * from its current state list if it is in the Ready state as - * the task's priority is going to change and there is one - * Ready list per priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); } else { mtCOVERAGE_TEST_MARKER(); } - prvAddTaskToReadyList( pxTCB ); - #if ( configNUMBER_OF_CORES > 1 ) + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) { - /* The priority of the task is dropped. Yield the core on - * which the task is running. */ - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else { - prvYieldCore( pxTCB->xTaskRunState ); + mtCOVERAGE_TEST_MARKER(); } + + prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); } - #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -6914,10 +7123,7 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + kernelEXIT_CRITICAL(); traceRETURN_vTaskPriorityDisinheritAfterTimeout(); } @@ -7006,8 +7212,8 @@ static void prvResetNextTaskUnblockTime( void ) { if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + taskGET_TASK_LOCK( xCoreID ); + taskGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7042,7 +7248,6 @@ static void prvResetNextTaskUnblockTime( void ) } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ - /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -7060,7 +7265,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_ISR_LOCK( xCoreID ); + taskGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7152,8 +7357,8 @@ static void prvResetNextTaskUnblockTime( void ) /* Get the xYieldPending stats inside the critical section. */ xYieldCurrentTask = xYieldPendings[ xCoreID ]; - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_TASK_LOCK( xCoreID ); portENABLE_INTERRUPTS(); /* When a task yields in a critical section it just sets @@ -7208,7 +7413,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + taskRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } else @@ -7232,6 +7437,28 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ +#if ( configNUMBER_OF_CORES > 1 ) + + BaseType_t xTaskUnlockCanYield( void ) + { + BaseType_t xReturn; + BaseType_t xCoreID = portGET_CORE_ID(); + + if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } + +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ +/*-----------------------------------------------------------*/ + #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) static char * prvWriteNameToBuffer( char * pcBuffer, @@ -7643,6 +7870,11 @@ TickType_t uxTaskResetEventItemValue( void ) traceENTER_pvTaskIncrementMutexHeldCount(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + pxTCB = pxCurrentTCB; /* If xSemaphoreCreateMutex() is called before any tasks have been created @@ -7652,6 +7884,11 @@ TickType_t uxTaskResetEventItemValue( void ) ( pxTCB->uxMutexesHeld )++; } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB ); return pxTCB; @@ -7685,7 +7922,7 @@ TickType_t uxTaskResetEventItemValue( void ) * has occurred and set the flag to indicate that we are waiting for * a notification. If we do not do so, a notification sent from an ISR * will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if the notification count is not already non-zero. */ if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0U ) @@ -7701,7 +7938,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7729,7 +7966,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; @@ -7752,7 +7989,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyTake( ulReturn ); @@ -7787,7 +8024,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* We MUST enter a critical section to atomically check and update the * task notification value. If we do not do so, a notification from * an ISR will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if a notification is not already pending. */ if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) @@ -7808,7 +8045,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7836,7 +8073,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); @@ -7866,7 +8103,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyWait( xReturn ); @@ -7894,7 +8131,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pulPreviousNotificationValue != NULL ) { @@ -7986,7 +8223,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotify( xReturn ); @@ -8038,7 +8275,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -8168,7 +8405,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGenericNotifyFromISR( xReturn ); @@ -8216,7 +8453,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -8302,7 +8539,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_vTaskGenericNotifyGiveFromISR(); } @@ -8327,7 +8564,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -8339,7 +8576,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyStateClear( xReturn ); @@ -8367,14 +8604,14 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyValueClear( ulReturn ); diff --git a/timers.c b/timers.c index 03765fe7b9..e674feeff6 100644 --- a/timers.c +++ b/timers.c @@ -79,6 +79,17 @@ #define tmrSTATUS_IS_STATICALLY_ALLOCATED ( 0x02U ) #define tmrSTATUS_IS_AUTORELOAD ( 0x04U ) +/* + * Macros to mark the start and end of a critical code region. + */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define tmrENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &xTaskSpinlock, ( portSPINLOCK_TYPE * ) &xISRSpinlock ) + #define tmrEXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) &xTaskSpinlock, ( portSPINLOCK_TYPE * ) &xISRSpinlock ) + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define tmrENTER_CRITICAL() taskENTER_CRITICAL() + #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /* The definition of the timers themselves. */ typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { @@ -149,6 +160,11 @@ PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /*-----------------------------------------------------------*/ /* @@ -576,7 +592,7 @@ traceENTER_vTimerSetReloadMode( xTimer, xAutoReload ); configASSERT( xTimer ); - taskENTER_CRITICAL(); + tmrENTER_CRITICAL(); { if( xAutoReload != pdFALSE ) { @@ -587,7 +603,7 @@ pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD ); } } - taskEXIT_CRITICAL(); + tmrEXIT_CRITICAL(); traceRETURN_vTimerSetReloadMode(); } @@ -601,7 +617,7 @@ traceENTER_xTimerGetReloadMode( xTimer ); configASSERT( xTimer ); - portBASE_TYPE_ENTER_CRITICAL(); + tmrENTER_CRITICAL(); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U ) { @@ -614,7 +630,7 @@ xReturn = pdTRUE; } } - portBASE_TYPE_EXIT_CRITICAL(); + tmrEXIT_CRITICAL(); traceRETURN_xTimerGetReloadMode( xReturn ); @@ -1113,7 +1129,7 @@ /* Check that the list from which active timers are referenced, and the * queue used to communicate with the timer service, have been * initialised. */ - taskENTER_CRITICAL(); + tmrENTER_CRITICAL(); { if( xTimerQueue == NULL ) { @@ -1155,7 +1171,7 @@ mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + tmrEXIT_CRITICAL(); } /*-----------------------------------------------------------*/ @@ -1169,7 +1185,7 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - portBASE_TYPE_ENTER_CRITICAL(); + tmrENTER_CRITICAL(); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U ) { @@ -1180,7 +1196,7 @@ xReturn = pdTRUE; } } - portBASE_TYPE_EXIT_CRITICAL(); + tmrEXIT_CRITICAL(); traceRETURN_xTimerIsTimerActive( xReturn ); @@ -1197,11 +1213,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + tmrENTER_CRITICAL(); { pvReturn = pxTimer->pvTimerID; } - taskEXIT_CRITICAL(); + tmrEXIT_CRITICAL(); traceRETURN_pvTimerGetTimerID( pvReturn ); @@ -1218,11 +1234,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + tmrENTER_CRITICAL(); { pxTimer->pvTimerID = pvNewID; } - taskEXIT_CRITICAL(); + tmrEXIT_CRITICAL(); traceRETURN_vTimerSetTimerID(); }