Skip to content

Commit

Permalink
fix(freertos-smp): Fixed the locking in vTaskSwitchContext()
Browse files Browse the repository at this point in the history
vTaskSwitchContext() must aquire both kernel locks, viz., task lock and
ISR lock. This is because, vTaskSwitchContext() can be called from
either task context or ISR context. Also, vTaskSwitchContext() must not
alter the interrupt state prematurely.

Signed-off-by: Sudeep Mohanty <[email protected]>
  • Loading branch information
sudeep-mohanty committed Jul 17, 2024
1 parent 1b98758 commit b54f3c4
Showing 1 changed file with 18 additions and 36 deletions.
54 changes: 18 additions & 36 deletions tasks.c
Original file line number Diff line number Diff line change
Expand Up @@ -5231,32 +5231,21 @@ BaseType_t xTaskIncrementTick( void )
{
traceENTER_vTaskSwitchContext();

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* Lock the kernel data group as we are about to access its members */
UBaseType_t uxSavedInterruptStatus;

if( portCHECK_IF_IN_ISR() == pdTRUE )
{
uxSavedInterruptStatus = taskLOCK_DATA_GROUP_FROM_ISR( &xISRSpinlock );
}
else
{
uxSavedInterruptStatus = 0;
taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock );
}
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/* Acquire both locks:
* - The ISR lock protects the ready list from simultaneous access by
* both other ISRs and tasks.
* - We also take the task lock to pause here in case another core has
* suspended the scheduler. We don't want to simply set xYieldPending
* and move on if another core suspended the scheduler. We should only
* do that if the current core has suspended the scheduler. */

/* Acquire both locks:
* - The ISR lock protects the ready list from simultaneous access by
* both other ISRs and tasks.
* - We also take the task lock to pause here in case another core has
* suspended the scheduler. We don't want to simply set xYieldPending
* and move on if another core suspended the scheduler. We should only
* do that if the current core has suspended the scheduler. */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
portGET_SPINLOCK( &xTaskSpinlock );
portGET_SPINLOCK( &xISRSpinlock );
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
portGET_ISR_LOCK();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
{
#if ( !( portUSING_GRANULAR_LOCKS == 1 ) )

Expand Down Expand Up @@ -5340,20 +5329,13 @@ BaseType_t xTaskIncrementTick( void )
#endif
}
}
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* We are done accessing the kernel data group. Unlock it. */
if( portCHECK_IF_IN_ISR() == pdTRUE )
{
taskUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, &xISRSpinlock );
}
else
{
taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock );
}
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
portRELEASE_SPINLOCK( &xISRSpinlock );
portRELEASE_SPINLOCK( &xTaskSpinlock );
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
portRELEASE_ISR_LOCK();
portRELEASE_TASK_LOCK();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */

traceRETURN_vTaskSwitchContext();
}
Expand Down

0 comments on commit b54f3c4

Please sign in to comment.