Merge branch 'bugfix/release_coproc_regs_taskdelete' into 'master'

Release coprocessor registers when a task is deleted.

See merge request !765
This commit is contained in:
Ivan Grokhotkov 2017-05-22 11:05:47 +08:00
commit 55fb4c54c8
6 changed files with 39 additions and 6 deletions

View file

@ -210,6 +210,7 @@ BaseType_t xPortInIsrContext();
#if( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings );
#endif
/* Multi-core: get current core ID */

View file

@ -172,6 +172,7 @@ typedef struct {
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR();
#define portCRITICAL_NESTING_IN_TCB 1
/*
@ -313,6 +314,10 @@ typedef struct {
#define PRIVILEGED_DATA
#endif
void _xt_coproc_release(volatile void * coproc_sa_base);
// porttrace
#if configUSE_TRACE_FACILITY_2
#include "porttrace.h"

View file

@ -253,6 +253,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMOR
*/
#endif
}
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings )
{
/* If task has live floating point registers somewhere, release them */
_xt_coproc_release( xMPUSettings->coproc_area );
}
#endif
/*

View file

@ -3550,9 +3550,16 @@ static void prvCheckTasksWaitingTermination( void )
{
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
--uxCurrentNumberOfTasks;
--uxTasksDeleted;
/* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
be called on the core the process is pinned on, if any */
if( pxTCB->xCoreID == tskNO_AFFINITY || pxTCB->xCoreID == xPortGetCoreID()) {
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
--uxCurrentNumberOfTasks;
--uxTasksDeleted;
} else {
/* Need to wait until the idle task on the other processor kills that task first. */
break;
}
}
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@ -3770,6 +3777,10 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
}
#endif /* configUSE_NEWLIB_REENTRANT */
#if ( portUSING_MPU_WRAPPERS == 1 )
vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
#endif
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
{
/* The task can only have been allocated dynamically - free both

View file

@ -378,6 +378,9 @@ May be called when a thread terminates or completes but does not delete
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).
Needs to be called on the processor the thread was running on. Unpinned threads
won't have an entry here because they get pinned as soon they use a coprocessor.
Entry Conditions:
A2 = Pointer to base of co-processor state save area.

View file

@ -931,7 +931,6 @@ _xt_coproc_exc:
addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
/* TODO: Remove this as soon as coprocessor state moving works across cores - JD */
/* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
here, to keep the entire thing from crashing, it's better to pin the task to whatever
core we're running on now. */
@ -944,14 +943,21 @@ _xt_coproc_exc:
/* Grab correct xt_coproc_owner_sa for this core */
movi a2, XCHAL_CP_MAX << 2
mull a2, a2, a3
mull a2, a2, a3 /* multiply by current processor id */
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a2
add a3, a3, a2 /* a3 = owner area needed for this processor */
extui a2, a0, 0, 16 /* coprocessor bitmask portion */
or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
wsr a4, CPENABLE
/*
Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
everywhere): _xt_coproc_release assumes it works like this in order not to need
locking.
*/
/* Get old coprocessor owner thread (save area ptr) and assign new one. */
addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
l32i a2, a3, 0 /* a2 = old owner's save area */