atomic: support fetch_and, fetch_and and fetch_xor
This commit is contained in:
parent
5f771fb7be
commit
8434845050
|
@ -2,6 +2,7 @@
|
||||||
archive: libxtensa.a
|
archive: libxtensa.a
|
||||||
entries:
|
entries:
|
||||||
eri (noflash_text)
|
eri (noflash_text)
|
||||||
|
stdatomic (noflash)
|
||||||
|
|
||||||
[mapping:hal]
|
[mapping:hal]
|
||||||
archive: libhal.a
|
archive: libhal.a
|
||||||
|
|
|
@ -15,42 +15,90 @@
|
||||||
//replacement for gcc built-in functions
|
//replacement for gcc built-in functions
|
||||||
|
|
||||||
#include "sdkconfig.h"
|
#include "sdkconfig.h"
|
||||||
#include "freertos/FreeRTOS.h"
|
#include <stdbool.h>
|
||||||
#include "xtensa/config/core-isa.h"
|
#include "xtensa/config/core-isa.h"
|
||||||
|
#include "xtensa/xtruntime.h"
|
||||||
|
|
||||||
|
//reserved to measure atomic operation time
|
||||||
|
#define atomic_benchmark_intr_disable()
|
||||||
|
#define atomic_benchmark_intr_restore(STATE)
|
||||||
|
|
||||||
|
// This allows nested interrupts disabling and restoring via local registers or stack.
|
||||||
|
// They can be called from interrupts too.
|
||||||
|
// WARNING: Only applies to current CPU.
|
||||||
|
#define _ATOMIC_ENTER_CRITICAL(void) ({ \
|
||||||
|
unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); \
|
||||||
|
atomic_benchmark_intr_disable(); \
|
||||||
|
state; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define _ATOMIC_EXIT_CRITICAL(state) do { \
|
||||||
|
atomic_benchmark_intr_restore(state); \
|
||||||
|
XTOS_RESTORE_JUST_INTLEVEL(state); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (type* mem, type* expect, type desired, int success, int failure) \
|
#define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (type* mem, type* expect, type desired, int success, int failure) \
|
||||||
{ \
|
{ \
|
||||||
bool ret = false; \
|
bool ret = false; \
|
||||||
unsigned state = portENTER_CRITICAL_NESTED(); \
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
if (*mem == *expect) { \
|
if (*mem == *expect) { \
|
||||||
ret = true; \
|
ret = true; \
|
||||||
*mem = desired; \
|
*mem = desired; \
|
||||||
} else { \
|
} else { \
|
||||||
*expect = *mem; \
|
*expect = *mem; \
|
||||||
} \
|
} \
|
||||||
portEXIT_CRITICAL_NESTED(state); \
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (type* ptr, type value, int memorder) \
|
#define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (type* ptr, type value, int memorder) \
|
||||||
{ \
|
{ \
|
||||||
unsigned state = portENTER_CRITICAL_NESTED(); \
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
type ret = *ptr; \
|
type ret = *ptr; \
|
||||||
*ptr = *ptr + value; \
|
*ptr = *ptr + value; \
|
||||||
portEXIT_CRITICAL_NESTED(state); \
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (type* ptr, type value, int memorder) \
|
#define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (type* ptr, type value, int memorder) \
|
||||||
{ \
|
{ \
|
||||||
unsigned state = portENTER_CRITICAL_NESTED(); \
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
type ret = *ptr; \
|
type ret = *ptr; \
|
||||||
*ptr = *ptr - value; \
|
*ptr = *ptr - value; \
|
||||||
portEXIT_CRITICAL_NESTED(state); \
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define FETCH_AND(n, type) type __atomic_fetch_and_ ## n (type* ptr, type value, int memorder) \
|
||||||
|
{ \
|
||||||
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
|
type ret = *ptr; \
|
||||||
|
*ptr = *ptr & value; \
|
||||||
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
|
return ret; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define FETCH_OR(n, type) type __atomic_fetch_or_ ## n (type* ptr, type value, int memorder) \
|
||||||
|
{ \
|
||||||
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
|
type ret = *ptr; \
|
||||||
|
*ptr = *ptr | value; \
|
||||||
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
|
return ret; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define FETCH_XOR(n, type) type __atomic_fetch_xor_ ## n (type* ptr, type value, int memorder) \
|
||||||
|
{ \
|
||||||
|
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
|
||||||
|
type ret = *ptr; \
|
||||||
|
*ptr = *ptr ^ value; \
|
||||||
|
_ATOMIC_EXIT_CRITICAL(state); \
|
||||||
|
return ret; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef XCHAL_HAVE_S32C1I
|
||||||
|
#error "XCHAL_HAVE_S32C1I not defined, include correct header!"
|
||||||
|
#endif
|
||||||
|
|
||||||
//this piece of code should only be compiled if the cpu doesn't support atomic compare and swap (s32c1i)
|
//this piece of code should only be compiled if the cpu doesn't support atomic compare and swap (s32c1i)
|
||||||
#if XCHAL_HAVE_S32C1I == 0
|
#if XCHAL_HAVE_S32C1I == 0
|
||||||
|
@ -72,4 +120,19 @@ FETCH_SUB(2, uint16_t)
|
||||||
FETCH_SUB(4, uint32_t)
|
FETCH_SUB(4, uint32_t)
|
||||||
FETCH_SUB(8, uint64_t)
|
FETCH_SUB(8, uint64_t)
|
||||||
|
|
||||||
|
FETCH_AND(1, uint8_t)
|
||||||
|
FETCH_AND(2, uint16_t)
|
||||||
|
FETCH_AND(4, uint32_t)
|
||||||
|
FETCH_AND(8, uint64_t)
|
||||||
|
|
||||||
|
FETCH_OR(1, uint8_t)
|
||||||
|
FETCH_OR(2, uint16_t)
|
||||||
|
FETCH_OR(4, uint32_t)
|
||||||
|
FETCH_OR(8, uint64_t)
|
||||||
|
|
||||||
|
FETCH_XOR(1, uint8_t)
|
||||||
|
FETCH_XOR(2, uint16_t)
|
||||||
|
FETCH_XOR(4, uint32_t)
|
||||||
|
FETCH_XOR(8, uint64_t)
|
||||||
|
|
||||||
#endif
|
#endif
|
Loading…
Reference in a new issue