SPI: More MR issues fixed, style fixup

This commit is contained in:
Jeroen Domburg 2017-04-27 11:24:44 +08:00
parent bf0c32364a
commit 04b901e629
5 changed files with 299 additions and 283 deletions

View file

@ -169,16 +169,6 @@ spi_dev_t *spicommon_hw_for_host(spi_host_device_t host);
int spicommon_irqsource_for_host(spi_host_device_t host);
/**
* @note In some (well-defined) cases in the ESP32 (at least rev v.0 and v.1), a SPI DMA channel will get confused. This can be remedied
* by resetting the SPI DMA hardware in case this happens. Unfortunately, the reset knob used for thsi will reset _both_ DMA channels, and
* as such can only done safely when both DMA channels are idle. These functions coordinate this.
*
* Essentially, when a reset is needed, a driver can request this using spicommon_dmaworkaround_req_reset. This is supposed to be called
* with an user-supplied function as an argument. If both DMA channels are idle, this call will reset the DMA subsystem and return true.
* If the other DMA channel is still busy, it will return false; as soon as the other DMA channel is done, however, it will reset the
* DMA subsystem and call the callback. The callback is then supposed to be used to continue the SPI drivers activity.
*/
/**
@ -190,6 +180,15 @@ typedef void(*dmaworkaround_cb_t)(void *arg);
/**
* @brief Request a reset for a certain DMA channel
*
* @note In some (well-defined) cases in the ESP32 (at least rev v.0 and v.1), a SPI DMA channel will get confused. This can be remedied
* by resetting the SPI DMA hardware in case this happens. Unfortunately, the reset knob used for thsi will reset _both_ DMA channels, and
* as such can only done safely when both DMA channels are idle. These functions coordinate this.
*
* Essentially, when a reset is needed, a driver can request this using spicommon_dmaworkaround_req_reset. This is supposed to be called
* with an user-supplied function as an argument. If both DMA channels are idle, this call will reset the DMA subsystem and return true.
* If the other DMA channel is still busy, it will return false; as soon as the other DMA channel is done, however, it will reset the
* DMA subsystem and call the callback. The callback is then supposed to be used to continue the SPI drivers activity.
*
* @param dmachan DMA channel associated with the SPI host that needs a reset
* @param cb Callback to call in case DMA channel cannot be reset immediately
* @param arg Argument to the callback

View file

@ -31,7 +31,7 @@ extern "C"
#define SPI_DEVICE_TXBIT_LSBFIRST (1<<0) ///< Transmit command/address/data LSB first instead of the default MSB first
#define SPI_DEVICE_RXBIT_LSBFIRST (1<<1) ///< Receive data LSB first instead of the default MSB first
#define SPI_DEVICE_BIT_LSBFIRST (SPI_TXBIT_LSBFIRST|SPI_RXBIT_LSBFIRST); ///< Transmit and receive LSB first
#define SPI_DEVICE_3WIRE (1<<2) ///< Use spid for both sending and receiving data
#define SPI_DEVICE_3WIRE (1<<2) ///< Use MOSI (=spid) for both sending and receiving data
#define SPI_DEVICE_POSITIVE_CS (1<<3) ///< Make CS positive during a transaction instead of negative
#define SPI_DEVICE_HALFDUPLEX (1<<4) ///< Transmit data before receiving it, instead of simultaneously
#define SPI_DEVICE_CLK_AS_CS (1<<5) ///< Output clock on CS line if CS is active

View file

@ -59,7 +59,7 @@ struct spi_slave_transaction_t {
};
/**
* @brief Initialize a SPI bus
* @brief Initialize a SPI bus as a slave interface
*
* @warning For now, only supports HSPI and VSPI.
*
@ -92,10 +92,13 @@ esp_err_t spi_slave_free(spi_host_device_t host);
/**
* @brief Queue a SPI transaction for execution
*
* This will queue a transaction for the master to pick it up. If the queue (specified in ``spi_slave_initialize``)
* is not full, this function will return directly; the actual transaction will be done if there aren't any
* unhandled transactions before it and the master initiates a SPI transaction by pulling down CS and sending out
* clock signals.
* Queues a SPI transaction to be executed by this slave device. (The transaction queue size was specified when the slave
* device was initialised via spi_slave_initialize.) This function may block if the queue is full (depending on the
* ticks_to_wait parameter). No SPI operation is directly initiated by this function, the next queued transaction
* will happen when the master initiates a SPI transaction by pulling down CS and sending out clock signals.
*
* This function hands over ownership of the buffers in ``trans_desc`` to the SPI slave driver; the application is
* not to access this memory until ``spi_slave_queue_trans`` is called to hand ownership back to the application.
*
* @param host SPI peripheral that is acting as a slave
* @param trans_desc Description of transaction to execute. Not const because we may want to write status back
@ -117,8 +120,10 @@ esp_err_t spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transact
* completed transaction so software can inspect the result and e.g. free the memory or
* re-use the buffers.
*
* It is mandatory to eventually use this function for any transaction queued by ``spi_slave_queue_trans``.
*
* @param host SPI peripheral to that is acting as a slave
* @param trans_desc Pointer to variable able to contain a pointer to the description of the
* @param[out] trans_desc Pointer to variable able to contain a pointer to the description of the
* transaction that is executed
* @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time
* out.

View file

@ -77,133 +77,137 @@ typedef struct {
/*
Bunch of constants for every SPI peripheral: GPIO signals, irqs, hw addr of registers etc
*/
static const spi_signal_conn_t io_signal[3]={
static const spi_signal_conn_t io_signal[3] = {
{
.spiclk_out=SPICLK_OUT_IDX,
.spiclk_in=SPICLK_IN_IDX,
.spid_out=SPID_OUT_IDX,
.spiq_out=SPIQ_OUT_IDX,
.spiwp_out=SPIWP_OUT_IDX,
.spihd_out=SPIHD_OUT_IDX,
.spid_in=SPID_IN_IDX,
.spiq_in=SPIQ_IN_IDX,
.spiwp_in=SPIWP_IN_IDX,
.spihd_in=SPIHD_IN_IDX,
.spics_out={SPICS0_OUT_IDX, SPICS1_OUT_IDX, SPICS2_OUT_IDX},
.spics_in=SPICS0_IN_IDX,
.spiclk_native=6,
.spid_native=8,
.spiq_native=7,
.spiwp_native=10,
.spihd_native=9,
.spics0_native=11,
.irq=ETS_SPI1_INTR_SOURCE,
.irq_dma=ETS_SPI1_DMA_INTR_SOURCE,
.module=PERIPH_SPI_MODULE,
.hw=&SPI1
.spiclk_out = SPICLK_OUT_IDX,
.spiclk_in = SPICLK_IN_IDX,
.spid_out = SPID_OUT_IDX,
.spiq_out = SPIQ_OUT_IDX,
.spiwp_out = SPIWP_OUT_IDX,
.spihd_out = SPIHD_OUT_IDX,
.spid_in = SPID_IN_IDX,
.spiq_in = SPIQ_IN_IDX,
.spiwp_in = SPIWP_IN_IDX,
.spihd_in = SPIHD_IN_IDX,
.spics_out = {SPICS0_OUT_IDX, SPICS1_OUT_IDX, SPICS2_OUT_IDX},
.spics_in = SPICS0_IN_IDX,
.spiclk_native = 6,
.spid_native = 8,
.spiq_native = 7,
.spiwp_native = 10,
.spihd_native = 9,
.spics0_native = 11,
.irq = ETS_SPI1_INTR_SOURCE,
.irq_dma = ETS_SPI1_DMA_INTR_SOURCE,
.module = PERIPH_SPI_MODULE,
.hw = &SPI1
}, {
.spiclk_out=HSPICLK_OUT_IDX,
.spiclk_in=HSPICLK_IN_IDX,
.spid_out=HSPID_OUT_IDX,
.spiq_out=HSPIQ_OUT_IDX,
.spiwp_out=HSPIWP_OUT_IDX,
.spihd_out=HSPIHD_OUT_IDX,
.spid_in=HSPID_IN_IDX,
.spiq_in=HSPIQ_IN_IDX,
.spiwp_in=HSPIWP_IN_IDX,
.spihd_in=HSPIHD_IN_IDX,
.spics_out={HSPICS0_OUT_IDX, HSPICS1_OUT_IDX, HSPICS2_OUT_IDX},
.spics_in=HSPICS0_IN_IDX,
.spiclk_native=14,
.spid_native=13,
.spiq_native=12,
.spiwp_native=2,
.spihd_native=4,
.spics0_native=15,
.irq=ETS_SPI2_INTR_SOURCE,
.irq_dma=ETS_SPI2_DMA_INTR_SOURCE,
.module=PERIPH_HSPI_MODULE,
.hw=&SPI2
.spiclk_out = HSPICLK_OUT_IDX,
.spiclk_in = HSPICLK_IN_IDX,
.spid_out = HSPID_OUT_IDX,
.spiq_out = HSPIQ_OUT_IDX,
.spiwp_out = HSPIWP_OUT_IDX,
.spihd_out = HSPIHD_OUT_IDX,
.spid_in = HSPID_IN_IDX,
.spiq_in = HSPIQ_IN_IDX,
.spiwp_in = HSPIWP_IN_IDX,
.spihd_in = HSPIHD_IN_IDX,
.spics_out = {HSPICS0_OUT_IDX, HSPICS1_OUT_IDX, HSPICS2_OUT_IDX},
.spics_in = HSPICS0_IN_IDX,
.spiclk_native = 14,
.spid_native = 13,
.spiq_native = 12,
.spiwp_native = 2,
.spihd_native = 4,
.spics0_native = 15,
.irq = ETS_SPI2_INTR_SOURCE,
.irq_dma = ETS_SPI2_DMA_INTR_SOURCE,
.module = PERIPH_HSPI_MODULE,
.hw = &SPI2
}, {
.spiclk_out=VSPICLK_OUT_IDX,
.spiclk_in=VSPICLK_IN_IDX,
.spid_out=VSPID_OUT_IDX,
.spiq_out=VSPIQ_OUT_IDX,
.spiwp_out=VSPIWP_OUT_IDX,
.spihd_out=VSPIHD_OUT_IDX,
.spid_in=VSPID_IN_IDX,
.spiq_in=VSPIQ_IN_IDX,
.spiwp_in=VSPIWP_IN_IDX,
.spihd_in=VSPIHD_IN_IDX,
.spics_out={VSPICS0_OUT_IDX, VSPICS1_OUT_IDX, VSPICS2_OUT_IDX},
.spics_in=VSPICS0_IN_IDX,
.spiclk_native=18,
.spid_native=23,
.spiq_native=19,
.spiwp_native=22,
.spihd_native=21,
.spics0_native=5,
.irq=ETS_SPI3_INTR_SOURCE,
.irq_dma=ETS_SPI3_DMA_INTR_SOURCE,
.module=PERIPH_VSPI_MODULE,
.hw=&SPI3
.spiclk_out = VSPICLK_OUT_IDX,
.spiclk_in = VSPICLK_IN_IDX,
.spid_out = VSPID_OUT_IDX,
.spiq_out = VSPIQ_OUT_IDX,
.spiwp_out = VSPIWP_OUT_IDX,
.spihd_out = VSPIHD_OUT_IDX,
.spid_in = VSPID_IN_IDX,
.spiq_in = VSPIQ_IN_IDX,
.spiwp_in = VSPIWP_IN_IDX,
.spihd_in = VSPIHD_IN_IDX,
.spics_out = {VSPICS0_OUT_IDX, VSPICS1_OUT_IDX, VSPICS2_OUT_IDX},
.spics_in = VSPICS0_IN_IDX,
.spiclk_native = 18,
.spid_native = 23,
.spiq_native = 19,
.spiwp_native = 22,
.spihd_native = 21,
.spics0_native = 5,
.irq = ETS_SPI3_INTR_SOURCE,
.irq_dma = ETS_SPI3_DMA_INTR_SOURCE,
.module = PERIPH_VSPI_MODULE,
.hw = &SPI3
}
};
//Periph 1 is 'claimed' by SPI flash code.
static bool spi_periph_claimed[3]={true, false, false};
static bool spi_periph_claimed[3] = {true, false, false};
//Returns true if this peripheral is successfully claimed, false if otherwise.
bool spicommon_periph_claim(spi_host_device_t host) {
bool spicommon_periph_claim(spi_host_device_t host)
{
bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], false, true);
if (ret) periph_module_enable(io_signal[host].module);
return ret;
}
//Returns true if this peripheral is successfully freed, false if otherwise.
bool spicommon_periph_free(spi_host_device_t host) {
bool spicommon_periph_free(spi_host_device_t host)
{
bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], true, false);
if (ret) periph_module_disable(io_signal[host].module);
return ret;
}
int spicommon_irqsource_for_host(spi_host_device_t host) {
int spicommon_irqsource_for_host(spi_host_device_t host)
{
return io_signal[host].irq;
}
spi_dev_t *spicommon_hw_for_host(spi_host_device_t host) {
spi_dev_t *spicommon_hw_for_host(spi_host_device_t host)
{
return io_signal[host].hw;
}
/*
Do the common stuff to hook up a SPI host to a bus defined by a bunch of GPIO pins. Feed it a host number and a
bus config struct and it'll set up the GPIO matrix and enable the device. It will set is_native to 1 if the bus
bus config struct and it'll set up the GPIO matrix and enable the device. It will set is_native to 1 if the bus
config can be done using the IOMUX instead of using the GPIO matrix.
*/
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, int dma_chan, int flags, bool *is_native)
{
bool native=true;
bool use_quad=(flags&SPICOMMON_BUSFLAG_QUAD)?true:false;
bool native = true;
bool use_quad = (flags & SPICOMMON_BUSFLAG_QUAD) != 0;
SPI_CHECK(bus_config->mosi_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num), "spid pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->sclk_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->sclk_io_num), "spiclk pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->miso_io_num<0 || GPIO_IS_VALID_GPIO(bus_config->miso_io_num), "spiq pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->mosi_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num), "spid pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->sclk_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->sclk_io_num), "spiclk pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->miso_io_num < 0 || GPIO_IS_VALID_GPIO(bus_config->miso_io_num), "spiq pin invalid", ESP_ERR_INVALID_ARG);
if (use_quad) {
SPI_CHECK(bus_config->quadwp_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadwp_io_num), "spiwp pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->quadhd_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadhd_io_num), "spihd pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->quadwp_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadwp_io_num), "spiwp pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->quadhd_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadhd_io_num), "spihd pin invalid", ESP_ERR_INVALID_ARG);
}
//Check if the selected pins correspond to the native pins of the peripheral
if (bus_config->mosi_io_num >= 0 && bus_config->mosi_io_num!=io_signal[host].spid_native) native=false;
if (bus_config->miso_io_num >= 0 && bus_config->miso_io_num!=io_signal[host].spiq_native) native=false;
if (bus_config->sclk_io_num >= 0 && bus_config->sclk_io_num!=io_signal[host].spiclk_native) native=false;
if (bus_config->mosi_io_num >= 0 && bus_config->mosi_io_num != io_signal[host].spid_native) native = false;
if (bus_config->miso_io_num >= 0 && bus_config->miso_io_num != io_signal[host].spiq_native) native = false;
if (bus_config->sclk_io_num >= 0 && bus_config->sclk_io_num != io_signal[host].spiclk_native) native = false;
if (use_quad) {
if (bus_config->quadwp_io_num >= 0 && bus_config->quadwp_io_num!=io_signal[host].spiwp_native) native=false;
if (bus_config->quadhd_io_num >= 0 && bus_config->quadhd_io_num!=io_signal[host].spihd_native) native=false;
if (bus_config->quadwp_io_num >= 0 && bus_config->quadwp_io_num != io_signal[host].spiwp_native) native = false;
if (bus_config->quadhd_io_num >= 0 && bus_config->quadhd_io_num != io_signal[host].spihd_native) native = false;
}
*is_native=native;
*is_native = native;
if (native) {
//All SPI native pin selections resolve to 1, so we put that here instead of trying to figure
@ -214,32 +218,32 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
if (use_quad && bus_config->quadhd_io_num > 0) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadhd_io_num], 1);
if (bus_config->sclk_io_num > 0) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], 1);
} else {
//Use GPIO
if (bus_config->mosi_io_num>0) {
//Use GPIO
if (bus_config->mosi_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->mosi_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->mosi_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->mosi_io_num, io_signal[host].spid_out, false, false);
gpio_matrix_in(bus_config->mosi_io_num, io_signal[host].spid_in, false);
}
if (bus_config->miso_io_num>0) {
if (bus_config->miso_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->miso_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->miso_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->miso_io_num, io_signal[host].spiq_out, false, false);
gpio_matrix_in(bus_config->miso_io_num, io_signal[host].spiq_in, false);
}
if (use_quad && bus_config->quadwp_io_num>0) {
if (use_quad && bus_config->quadwp_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadwp_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->quadwp_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->quadwp_io_num, io_signal[host].spiwp_out, false, false);
gpio_matrix_in(bus_config->quadwp_io_num, io_signal[host].spiwp_in, false);
}
if (use_quad && bus_config->quadhd_io_num>0) {
if (use_quad && bus_config->quadhd_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadhd_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->quadhd_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->quadhd_io_num, io_signal[host].spihd_out, false, false);
gpio_matrix_in(bus_config->quadhd_io_num, io_signal[host].spihd_in, false);
}
if (bus_config->sclk_io_num>0) {
if (bus_config->sclk_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->sclk_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->sclk_io_num, io_signal[host].spiclk_out, false, false);
@ -255,9 +259,10 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
//Find any pin with output muxed to ``func`` and reset it to GPIO
static void reset_func_to_gpio(int func) {
for (int x=0; x<GPIO_PIN_COUNT; x++) {
if (GPIO_IS_VALID_GPIO(x) && (READ_PERI_REG(GPIO_FUNC0_OUT_SEL_CFG_REG+(x*4))&GPIO_FUNC0_OUT_SEL_M)==func) {
static void reset_func_to_gpio(int func)
{
for (int x = 0; x < GPIO_PIN_COUNT; x++) {
if (GPIO_IS_VALID_GPIO(x) && (READ_PERI_REG(GPIO_FUNC0_OUT_SEL_CFG_REG + (x * 4))&GPIO_FUNC0_OUT_SEL_M) == func) {
gpio_matrix_out(x, SIG_GPIO_OUT_IDX, false, false);
}
}
@ -266,11 +271,11 @@ static void reset_func_to_gpio(int func) {
esp_err_t spicommon_bus_free_io(spi_host_device_t host)
{
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spid_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spid_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spid_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spid_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], PIN_FUNC_GPIO);
reset_func_to_gpio(io_signal[host].spid_out);
reset_func_to_gpio(io_signal[host].spiq_out);
reset_func_to_gpio(io_signal[host].spiclk_out);
@ -279,50 +284,53 @@ esp_err_t spicommon_bus_free_io(spi_host_device_t host)
return ESP_OK;
}
void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_num, int force_gpio_matrix) {
if (!force_gpio_matrix && cs_io_num == io_signal[host].spics0_native && cs_num==0) {
void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_num, int force_gpio_matrix)
{
if (!force_gpio_matrix && cs_io_num == io_signal[host].spics0_native && cs_num == 0) {
//The cs0s for all SPI peripherals map to pin mux source 1, so we use that instead of a define.
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], 1);
} else {
//Use GPIO matrix
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], PIN_FUNC_GPIO);
gpio_matrix_out(cs_io_num, io_signal[host].spics_out[cs_num], false, false);
if (cs_num==0) gpio_matrix_in(cs_io_num, io_signal[host].spics_in, false);
if (cs_num == 0) gpio_matrix_in(cs_io_num, io_signal[host].spics_in, false);
}
}
void spicommon_cs_free(spi_host_device_t host, int cs_io_num) {
if (cs_io_num==0 && REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], MCU_SEL)==1) {
void spicommon_cs_free(spi_host_device_t host, int cs_io_num)
{
if (cs_io_num == 0 && REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], MCU_SEL) == 1) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], PIN_FUNC_GPIO);
}
reset_func_to_gpio(io_signal[host].spics_out[cs_io_num]);
}
//Set up a list of dma descriptors. dmadesc is an array of descriptors. Data is the buffer to point to.
void spicommon_setup_dma_desc_links(lldesc_t *dmadesc, int len, const uint8_t *data, bool isrx) {
int n=0;
void spicommon_setup_dma_desc_links(lldesc_t *dmadesc, int len, const uint8_t *data, bool isrx)
{
int n = 0;
while (len) {
int dmachunklen=len;
if (dmachunklen > SPI_MAX_DMA_LEN) dmachunklen=SPI_MAX_DMA_LEN;
int dmachunklen = len;
if (dmachunklen > SPI_MAX_DMA_LEN) dmachunklen = SPI_MAX_DMA_LEN;
if (isrx) {
//Receive needs DMA length rounded to next 32-bit boundary
dmadesc[n].size=(dmachunklen+3)&(~3);
dmadesc[n].length=(dmachunklen+3)&(~3);
dmadesc[n].size = (dmachunklen + 3) & (~3);
dmadesc[n].length = (dmachunklen + 3) & (~3);
} else {
dmadesc[n].size=dmachunklen;
dmadesc[n].length=dmachunklen;
dmadesc[n].size = dmachunklen;
dmadesc[n].length = dmachunklen;
}
dmadesc[n].buf=(uint8_t*)data;
dmadesc[n].eof=0;
dmadesc[n].sosf=0;
dmadesc[n].owner=1;
dmadesc[n].qe.stqe_next=&dmadesc[n+1];
len-=dmachunklen;
data+=dmachunklen;
dmadesc[n].buf = (uint8_t *)data;
dmadesc[n].eof = 0;
dmadesc[n].sosf = 0;
dmadesc[n].owner = 1;
dmadesc[n].qe.stqe_next = &dmadesc[n + 1];
len -= dmachunklen;
data += dmachunklen;
n++;
}
dmadesc[n-1].eof=1; //Mark last DMA desc as end of stream.
dmadesc[n-1].qe.stqe_next=NULL;
dmadesc[n - 1].eof = 1; //Mark last DMA desc as end of stream.
dmadesc[n - 1].qe.stqe_next = NULL;
}
@ -331,28 +339,28 @@ Code for workaround for DMA issue in ESP32 v0/v1 silicon
*/
static volatile int dmaworkaround_channels_busy[2]={0,0};
static volatile int dmaworkaround_channels_busy[2] = {0, 0};
static dmaworkaround_cb_t dmaworkaround_cb;
static void *dmaworkaround_cb_arg;
static portMUX_TYPE dmaworkaround_mux=portMUX_INITIALIZER_UNLOCKED;
static int dmaworkaround_waiting_for_chan=0;
static portMUX_TYPE dmaworkaround_mux = portMUX_INITIALIZER_UNLOCKED;
static int dmaworkaround_waiting_for_chan = 0;
bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t cb, void *arg)
bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t cb, void *arg)
{
int otherchan=(dmachan==1)?2:1;
int otherchan = (dmachan == 1) ? 2 : 1;
bool ret;
portENTER_CRITICAL(&dmaworkaround_mux);
if (dmaworkaround_channels_busy[otherchan]) {
//Other channel is busy. Call back when it's done.
dmaworkaround_cb=cb;
dmaworkaround_cb_arg=arg;
dmaworkaround_waiting_for_chan=otherchan;
ret=false;
dmaworkaround_cb = cb;
dmaworkaround_cb_arg = arg;
dmaworkaround_waiting_for_chan = otherchan;
ret = false;
} else {
//Reset DMA
SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
ret=true;
ret = true;
}
portEXIT_CRITICAL(&dmaworkaround_mux);
return ret;
@ -360,17 +368,18 @@ bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t
bool IRAM_ATTR spicommon_dmaworkaround_reset_in_progress()
{
return (dmaworkaround_waiting_for_chan!=0);
return (dmaworkaround_waiting_for_chan != 0);
}
void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan) {
void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan)
{
portENTER_CRITICAL(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan]=0;
dmaworkaround_channels_busy[dmachan] = 0;
if (dmaworkaround_waiting_for_chan == dmachan) {
//Reset DMA
SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
dmaworkaround_waiting_for_chan=0;
dmaworkaround_waiting_for_chan = 0;
//Call callback
dmaworkaround_cb(dmaworkaround_cb_arg);
@ -378,9 +387,10 @@ void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan) {
portEXIT_CRITICAL(&dmaworkaround_mux);
}
void IRAM_ATTR spicommon_dmaworkaround_transfer_active(int dmachan) {
void IRAM_ATTR spicommon_dmaworkaround_transfer_active(int dmachan)
{
portENTER_CRITICAL(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan]=1;
dmaworkaround_channels_busy[dmachan] = 1;
portEXIT_CRITICAL(&dmaworkaround_mux);
}

View file

@ -71,100 +71,100 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
//We only support HSPI/VSPI, period.
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
claimed=spicommon_periph_claim(host);
claimed = spicommon_periph_claim(host);
SPI_CHECK(claimed, "host already in use", ESP_ERR_INVALID_STATE);
spihost[host]=malloc(sizeof(spi_slave_t));
if (spihost[host]==NULL) goto nomem;
spihost[host] = malloc(sizeof(spi_slave_t));
if (spihost[host] == NULL) goto nomem;
memset(spihost[host], 0, sizeof(spi_slave_t));
memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t));
spicommon_bus_initialize_io(host, bus_config, dma_chan, SPICOMMON_BUSFLAG_SLAVE, &native);
gpio_set_direction(slave_config->spics_io_num, GPIO_MODE_INPUT);
spicommon_cs_initialize(host, slave_config->spics_io_num, 0, native == false);
spihost[host]->no_gpio_matrix=native;
spihost[host]->dma_chan=dma_chan;
if (dma_chan!=0) {
spihost[host]->no_gpio_matrix = native;
spihost[host]->dma_chan = dma_chan;
if (dma_chan != 0) {
//See how many dma descriptors we need and allocate them
int dma_desc_ct=(bus_config->max_transfer_sz+SPI_MAX_DMA_LEN-1)/SPI_MAX_DMA_LEN;
if (dma_desc_ct==0) dma_desc_ct=1; //default to 4k when max is not given
spihost[host]->max_transfer_sz = dma_desc_ct*SPI_MAX_DMA_LEN;
spihost[host]->dmadesc_tx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA);
spihost[host]->dmadesc_rx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA);
int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN;
if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
spihost[host]->dmadesc_tx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
spihost[host]->dmadesc_rx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem;
} else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
spihost[host]->max_transfer_sz=16*4;
spihost[host]->max_transfer_sz = 16 * 4;
}
//Create queues
spihost[host]->trans_queue=xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
spihost[host]->ret_queue=xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
spihost[host]->trans_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
spihost[host]->ret_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
if (!spihost[host]->trans_queue || !spihost[host]->ret_queue) goto nomem;
esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void*)spihost[host], &spihost[host]->intr);
spihost[host]->hw=spicommon_hw_for_host(host);
esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void *)spihost[host], &spihost[host]->intr);
spihost[host]->hw = spicommon_hw_for_host(host);
//Configure slave
spihost[host]->hw->clock.val=0;
spihost[host]->hw->user.val=0;
spihost[host]->hw->ctrl.val=0;
spihost[host]->hw->slave.wr_rd_buf_en=1; //no sure if needed
spihost[host]->hw->user.doutdin=1; //we only support full duplex
spihost[host]->hw->user.sio=0;
spihost[host]->hw->slave.slave_mode=1;
spihost[host]->hw->dma_conf.val |= SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start=0;
spihost[host]->hw->dma_in_link.start=0;
spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST);
spihost[host]->hw->dma_conf.out_data_burst_en=1;
spihost[host]->hw->slave.sync_reset=1;
spihost[host]->hw->slave.sync_reset=0;
spihost[host]->hw->clock.val = 0;
spihost[host]->hw->user.val = 0;
spihost[host]->hw->ctrl.val = 0;
spihost[host]->hw->slave.wr_rd_buf_en = 1; //no sure if needed
spihost[host]->hw->user.doutdin = 1; //we only support full duplex
spihost[host]->hw->user.sio = 0;
spihost[host]->hw->slave.slave_mode = 1;
spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start = 0;
spihost[host]->hw->dma_in_link.start = 0;
spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
spihost[host]->hw->dma_conf.out_data_burst_en = 1;
spihost[host]->hw->slave.sync_reset = 1;
spihost[host]->hw->slave.sync_reset = 0;
bool nodelay=true;
spihost[host]->hw->ctrl.rd_bit_order=(slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST)?1:0;
spihost[host]->hw->ctrl.wr_bit_order=(slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST)?1:0;
if (slave_config->mode==0) {
spihost[host]->hw->pin.ck_idle_edge=0;
spihost[host]->hw->user.ck_i_edge=1;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:2;
} else if (slave_config->mode==1) {
spihost[host]->hw->pin.ck_idle_edge=0;
spihost[host]->hw->user.ck_i_edge=0;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:1;
} else if (slave_config->mode==2) {
spihost[host]->hw->pin.ck_idle_edge=1;
spihost[host]->hw->user.ck_i_edge=0;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:1;
} else if (slave_config->mode==3) {
spihost[host]->hw->pin.ck_idle_edge=1;
spihost[host]->hw->user.ck_i_edge=1;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:2;
bool nodelay = true;
spihost[host]->hw->ctrl.rd_bit_order = (slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST) ? 1 : 0;
spihost[host]->hw->ctrl.wr_bit_order = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0;
if (slave_config->mode == 0) {
spihost[host]->hw->pin.ck_idle_edge = 0;
spihost[host]->hw->user.ck_i_edge = 1;
spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2;
} else if (slave_config->mode == 1) {
spihost[host]->hw->pin.ck_idle_edge = 0;
spihost[host]->hw->user.ck_i_edge = 0;
spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1;
} else if (slave_config->mode == 2) {
spihost[host]->hw->pin.ck_idle_edge = 1;
spihost[host]->hw->user.ck_i_edge = 0;
spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1;
} else if (slave_config->mode == 3) {
spihost[host]->hw->pin.ck_idle_edge = 1;
spihost[host]->hw->user.ck_i_edge = 1;
spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2;
}
//Reset DMA
spihost[host]->hw->dma_conf.val|=SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start=0;
spihost[host]->hw->dma_in_link.start=0;
spihost[host]->hw->dma_conf.val&=~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST);
spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start = 0;
spihost[host]->hw->dma_in_link.start = 0;
spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
//Disable unneeded ints
spihost[host]->hw->slave.rd_buf_done=0;
spihost[host]->hw->slave.wr_buf_done=0;
spihost[host]->hw->slave.rd_sta_done=0;
spihost[host]->hw->slave.wr_sta_done=0;
spihost[host]->hw->slave.rd_buf_inten=0;
spihost[host]->hw->slave.wr_buf_inten=0;
spihost[host]->hw->slave.rd_sta_inten=0;
spihost[host]->hw->slave.wr_sta_inten=0;
spihost[host]->hw->slave.rd_buf_done = 0;
spihost[host]->hw->slave.wr_buf_done = 0;
spihost[host]->hw->slave.rd_sta_done = 0;
spihost[host]->hw->slave.wr_sta_done = 0;
spihost[host]->hw->slave.rd_buf_inten = 0;
spihost[host]->hw->slave.wr_buf_inten = 0;
spihost[host]->hw->slave.rd_sta_inten = 0;
spihost[host]->hw->slave.wr_sta_inten = 0;
//Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as
//disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling
//disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling
//any transactions that are queued.
spihost[host]->hw->slave.trans_inten=1;
spihost[host]->hw->slave.trans_done=1;
spihost[host]->hw->slave.trans_inten = 1;
spihost[host]->hw->slave.trans_done = 1;
return ESP_OK;
@ -176,7 +176,7 @@ nomem:
free(spihost[host]->dmadesc_rx);
}
free(spihost[host]);
spihost[host]=NULL;
spihost[host] = NULL;
spicommon_periph_free(host);
return ESP_ERR_NO_MEM;
}
@ -190,9 +190,9 @@ esp_err_t spi_slave_free(spi_host_device_t host)
free(spihost[host]->dmadesc_tx);
free(spihost[host]->dmadesc_rx);
free(spihost[host]);
spihost[host]=NULL;
spihost[host] = NULL;
spicommon_periph_free(host);
spihost[host]=NULL;
spihost[host] = NULL;
return ESP_OK;
}
@ -203,8 +203,8 @@ esp_err_t spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transact
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz*8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
r=xQueueSend(spihost[host]->trans_queue, (void*)&trans_desc, ticks_to_wait);
SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
r = xQueueSend(spihost[host]->trans_queue, (void *)&trans_desc, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT;
esp_intr_enable(spihost[host]->intr);
return ESP_OK;
@ -216,7 +216,7 @@ esp_err_t spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transacti
BaseType_t r;
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
r=xQueueReceive(spihost[host]->ret_queue, (void*)trans_desc, ticks_to_wait);
r = xQueueReceive(spihost[host]->ret_queue, (void *)trans_desc, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT;
return ESP_OK;
}
@ -227,16 +227,17 @@ esp_err_t spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *tr
esp_err_t ret;
spi_slave_transaction_t *ret_trans;
//ToDo: check if any spi transfers in flight
ret=spi_slave_queue_trans(host, trans_desc, ticks_to_wait);
if (ret!=ESP_OK) return ret;
ret=spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait);
if (ret!=ESP_OK) return ret;
assert(ret_trans==trans_desc);
ret = spi_slave_queue_trans(host, trans_desc, ticks_to_wait);
if (ret != ESP_OK) return ret;
ret = spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait);
if (ret != ESP_OK) return ret;
assert(ret_trans == trans_desc);
return ESP_OK;
}
#ifdef DEBUG_SLAVE
static void dumpregs(spi_dev_t *hw) {
static void dumpregs(spi_dev_t *hw)
{
ets_printf("***REG DUMP ***\n");
ets_printf("mosi_dlen : %08X\n", hw->mosi_dlen.val);
ets_printf("miso_dlen : %08X\n", hw->miso_dlen.val);
@ -249,17 +250,18 @@ static void dumpregs(spi_dev_t *hw) {
}
static void dumpll(lldesc_t *ll) {
static void dumpll(lldesc_t *ll)
{
ets_printf("****LL DUMP****\n");
ets_printf("Size %d\n", ll->size);
ets_printf("Len: %d\n", ll->length);
ets_printf("Owner: %s\n", ll->owner?"dma":"cpu");
ets_printf("Owner: %s\n", ll->owner ? "dma" : "cpu");
}
#endif
static void IRAM_ATTR spi_slave_restart_after_dmareset(void *arg)
{
spi_slave_t *host=(spi_slave_t*)arg;
spi_slave_t *host = (spi_slave_t *)arg;
esp_intr_enable(host->intr);
}
@ -269,9 +271,9 @@ static void IRAM_ATTR spi_slave_restart_after_dmareset(void *arg)
static void IRAM_ATTR spi_intr(void *arg)
{
BaseType_t r;
BaseType_t do_yield=pdFALSE;
spi_slave_transaction_t *trans=NULL;
spi_slave_t *host=(spi_slave_t*)arg;
BaseType_t do_yield = pdFALSE;
spi_slave_transaction_t *trans = NULL;
spi_slave_t *host = (spi_slave_t *)arg;
#ifdef DEBUG_SLAVE
dumpregs(host->hw);
@ -284,20 +286,20 @@ static void IRAM_ATTR spi_intr(void *arg)
if (host->cur_trans) {
if (host->dma_chan == 0 && host->cur_trans->rx_buffer) {
//Copy result out
uint32_t *data=host->cur_trans->rx_buffer;
for (int x=0; x<host->cur_trans->length; x+=32) {
uint32_t *data = host->cur_trans->rx_buffer;
for (int x = 0; x < host->cur_trans->length; x += 32) {
uint32_t word;
int len=host->cur_trans->length-x;
if (len>32) len=32;
word=host->hw->data_buf[(x/32)];
memcpy(&data[x/32], &word, (len+7)/8);
int len = host->cur_trans->length - x;
if (len > 32) len = 32;
word = host->hw->data_buf[(x / 32)];
memcpy(&data[x / 32], &word, (len + 7) / 8);
}
} else if (host->dma_chan != 0 && host->cur_trans->rx_buffer) {
int i;
//In case CS goes high too soon, the transfer is aborted while the DMA channel still thinks it's going. This
//leads to issues later on, so in that case we need to reset the channel. The state can be detected because
//the DMA system doesn't give back the offending descriptor; the owner is still set to DMA.
for (i=0; host->dmadesc_rx[i].eof==0 && host->dmadesc_rx[i].owner==0; i++) ;
for (i = 0; host->dmadesc_rx[i].eof == 0 && host->dmadesc_rx[i].owner == 0; i++) ;
if (host->dmadesc_rx[i].owner) {
spicommon_dmaworkaround_req_reset(host->dma_chan, spi_slave_restart_after_dmareset, host);
}
@ -306,9 +308,9 @@ static void IRAM_ATTR spi_intr(void *arg)
//Okay, transaction is done.
//Return transaction descriptor.
xQueueSendFromISR(host->ret_queue, &host->cur_trans, &do_yield);
host->cur_trans=NULL;
host->cur_trans = NULL;
}
if (host->dma_chan!=0) {
if (host->dma_chan != 0) {
spicommon_dmaworkaround_idle(host->dma_chan);
if (spicommon_dmaworkaround_reset_in_progress()) {
//We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr.
@ -319,71 +321,71 @@ static void IRAM_ATTR spi_intr(void *arg)
}
//Grab next transaction
r=xQueueReceiveFromISR(host->trans_queue, &trans, &do_yield);
r = xQueueReceiveFromISR(host->trans_queue, &trans, &do_yield);
if (!r) {
//No packet waiting. Disable interrupt.
esp_intr_disable(host->intr);
} else {
//We have a transaction. Send it.
host->hw->slave.trans_done=0; //clear int bit
host->cur_trans=trans;
host->hw->slave.trans_done = 0; //clear int bit
host->cur_trans = trans;
if (host->dma_chan != 0) {
spicommon_dmaworkaround_transfer_active(host->dma_chan);
host->hw->dma_conf.val |= SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST;
host->hw->dma_out_link.start=0;
host->hw->dma_in_link.start=0;
host->hw->dma_conf.val &= ~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST);
host->hw->dma_conf.out_data_burst_en=0;
host->hw->dma_conf.indscr_burst_en=0;
host->hw->dma_conf.outdscr_burst_en=0;
host->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
host->hw->dma_out_link.start = 0;
host->hw->dma_in_link.start = 0;
host->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
host->hw->dma_conf.out_data_burst_en = 0;
host->hw->dma_conf.indscr_burst_en = 0;
host->hw->dma_conf.outdscr_burst_en = 0;
//Fill DMA descriptors
if (trans->rx_buffer) {
host->hw->user.usr_miso_highpart=0;
spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->length+7)/8), trans->rx_buffer, true);
host->hw->dma_in_link.addr=(int)(&host->dmadesc_rx[0]) & 0xFFFFF;
host->hw->dma_in_link.start=1;
host->hw->user.usr_miso_highpart = 0;
spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->length + 7) / 8), trans->rx_buffer, true);
host->hw->dma_in_link.addr = (int)(&host->dmadesc_rx[0]) & 0xFFFFF;
host->hw->dma_in_link.start = 1;
}
if (trans->tx_buffer) {
spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length+7)/8, trans->tx_buffer, false);
host->hw->user.usr_mosi_highpart=0;
host->hw->dma_out_link.addr=(int)(&host->dmadesc_tx[0]) & 0xFFFFF;
host->hw->dma_out_link.start=1;
spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length + 7) / 8, trans->tx_buffer, false);
host->hw->user.usr_mosi_highpart = 0;
host->hw->dma_out_link.addr = (int)(&host->dmadesc_tx[0]) & 0xFFFFF;
host->hw->dma_out_link.start = 1;
}
host->hw->slave.sync_reset=1;
host->hw->slave.sync_reset=0;
host->hw->slave.sync_reset = 1;
host->hw->slave.sync_reset = 0;
} else {
//No DMA. Turn off SPI and copy data to transmit buffers.
host->hw->cmd.usr=0;
host->hw->slave.sync_reset=1;
host->hw->slave.sync_reset=0;
host->hw->cmd.usr = 0;
host->hw->slave.sync_reset = 1;
host->hw->slave.sync_reset = 0;
host->hw->user.usr_miso_highpart=0;
host->hw->user.usr_mosi_highpart=0;
host->hw->user.usr_miso_highpart = 0;
host->hw->user.usr_mosi_highpart = 0;
if (trans->tx_buffer) {
const uint32_t *data=host->cur_trans->tx_buffer;
for (int x=0; x<trans->length; x+=32) {
const uint32_t *data = host->cur_trans->tx_buffer;
for (int x = 0; x < trans->length; x += 32) {
uint32_t word;
memcpy(&word, &data[x/32], 4);
host->hw->data_buf[(x/32)]=word;
memcpy(&word, &data[x / 32], 4);
host->hw->data_buf[(x / 32)] = word;
}
}
}
host->hw->slv_rd_bit.slv_rdata_bit=0;
host->hw->slv_wrbuf_dlen.bit_len=trans->length-1;
host->hw->slv_rdbuf_dlen.bit_len=trans->length-1;
host->hw->mosi_dlen.usr_mosi_dbitlen=trans->length-1;
host->hw->miso_dlen.usr_miso_dbitlen=trans->length-1;
host->hw->user.usr_mosi=(trans->tx_buffer==NULL)?0:1;
host->hw->user.usr_miso=(trans->rx_buffer==NULL)?0:1;
host->hw->slv_rd_bit.slv_rdata_bit = 0;
host->hw->slv_wrbuf_dlen.bit_len = trans->length - 1;
host->hw->slv_rdbuf_dlen.bit_len = trans->length - 1;
host->hw->mosi_dlen.usr_mosi_dbitlen = trans->length - 1;
host->hw->miso_dlen.usr_miso_dbitlen = trans->length - 1;
host->hw->user.usr_mosi = (trans->tx_buffer == NULL) ? 0 : 1;
host->hw->user.usr_miso = (trans->rx_buffer == NULL) ? 0 : 1;
//Kick off transfer
host->hw->cmd.usr=1;
host->hw->cmd.usr = 1;
if (host->cfg.post_setup_cb) host->cfg.post_setup_cb(trans);
}
if (do_yield) portYIELD_FROM_ISR();