SPI: More MR issues fixed, style fixup

This commit is contained in:
Jeroen Domburg 2017-04-27 11:24:44 +08:00
parent bf0c32364a
commit 04b901e629
5 changed files with 299 additions and 283 deletions

View file

@ -169,16 +169,6 @@ spi_dev_t *spicommon_hw_for_host(spi_host_device_t host);
int spicommon_irqsource_for_host(spi_host_device_t host); int spicommon_irqsource_for_host(spi_host_device_t host);
/**
* @note In some (well-defined) cases in the ESP32 (at least rev v.0 and v.1), a SPI DMA channel will get confused. This can be remedied
* by resetting the SPI DMA hardware in case this happens. Unfortunately, the reset knob used for thsi will reset _both_ DMA channels, and
* as such can only done safely when both DMA channels are idle. These functions coordinate this.
*
* Essentially, when a reset is needed, a driver can request this using spicommon_dmaworkaround_req_reset. This is supposed to be called
* with an user-supplied function as an argument. If both DMA channels are idle, this call will reset the DMA subsystem and return true.
* If the other DMA channel is still busy, it will return false; as soon as the other DMA channel is done, however, it will reset the
* DMA subsystem and call the callback. The callback is then supposed to be used to continue the SPI drivers activity.
*/
/** /**
@ -190,6 +180,15 @@ typedef void(*dmaworkaround_cb_t)(void *arg);
/** /**
* @brief Request a reset for a certain DMA channel * @brief Request a reset for a certain DMA channel
* *
* @note In some (well-defined) cases in the ESP32 (at least rev v.0 and v.1), a SPI DMA channel will get confused. This can be remedied
* by resetting the SPI DMA hardware in case this happens. Unfortunately, the reset knob used for thsi will reset _both_ DMA channels, and
* as such can only done safely when both DMA channels are idle. These functions coordinate this.
*
* Essentially, when a reset is needed, a driver can request this using spicommon_dmaworkaround_req_reset. This is supposed to be called
* with an user-supplied function as an argument. If both DMA channels are idle, this call will reset the DMA subsystem and return true.
* If the other DMA channel is still busy, it will return false; as soon as the other DMA channel is done, however, it will reset the
* DMA subsystem and call the callback. The callback is then supposed to be used to continue the SPI drivers activity.
*
* @param dmachan DMA channel associated with the SPI host that needs a reset * @param dmachan DMA channel associated with the SPI host that needs a reset
* @param cb Callback to call in case DMA channel cannot be reset immediately * @param cb Callback to call in case DMA channel cannot be reset immediately
* @param arg Argument to the callback * @param arg Argument to the callback

View file

@ -31,7 +31,7 @@ extern "C"
#define SPI_DEVICE_TXBIT_LSBFIRST (1<<0) ///< Transmit command/address/data LSB first instead of the default MSB first #define SPI_DEVICE_TXBIT_LSBFIRST (1<<0) ///< Transmit command/address/data LSB first instead of the default MSB first
#define SPI_DEVICE_RXBIT_LSBFIRST (1<<1) ///< Receive data LSB first instead of the default MSB first #define SPI_DEVICE_RXBIT_LSBFIRST (1<<1) ///< Receive data LSB first instead of the default MSB first
#define SPI_DEVICE_BIT_LSBFIRST (SPI_TXBIT_LSBFIRST|SPI_RXBIT_LSBFIRST); ///< Transmit and receive LSB first #define SPI_DEVICE_BIT_LSBFIRST (SPI_TXBIT_LSBFIRST|SPI_RXBIT_LSBFIRST); ///< Transmit and receive LSB first
#define SPI_DEVICE_3WIRE (1<<2) ///< Use spid for both sending and receiving data #define SPI_DEVICE_3WIRE (1<<2) ///< Use MOSI (=spid) for both sending and receiving data
#define SPI_DEVICE_POSITIVE_CS (1<<3) ///< Make CS positive during a transaction instead of negative #define SPI_DEVICE_POSITIVE_CS (1<<3) ///< Make CS positive during a transaction instead of negative
#define SPI_DEVICE_HALFDUPLEX (1<<4) ///< Transmit data before receiving it, instead of simultaneously #define SPI_DEVICE_HALFDUPLEX (1<<4) ///< Transmit data before receiving it, instead of simultaneously
#define SPI_DEVICE_CLK_AS_CS (1<<5) ///< Output clock on CS line if CS is active #define SPI_DEVICE_CLK_AS_CS (1<<5) ///< Output clock on CS line if CS is active

View file

@ -59,7 +59,7 @@ struct spi_slave_transaction_t {
}; };
/** /**
* @brief Initialize a SPI bus * @brief Initialize a SPI bus as a slave interface
* *
* @warning For now, only supports HSPI and VSPI. * @warning For now, only supports HSPI and VSPI.
* *
@ -92,10 +92,13 @@ esp_err_t spi_slave_free(spi_host_device_t host);
/** /**
* @brief Queue a SPI transaction for execution * @brief Queue a SPI transaction for execution
* *
* This will queue a transaction for the master to pick it up. If the queue (specified in ``spi_slave_initialize``) * Queues a SPI transaction to be executed by this slave device. (The transaction queue size was specified when the slave
* is not full, this function will return directly; the actual transaction will be done if there aren't any * device was initialised via spi_slave_initialize.) This function may block if the queue is full (depending on the
* unhandled transactions before it and the master initiates a SPI transaction by pulling down CS and sending out * ticks_to_wait parameter). No SPI operation is directly initiated by this function, the next queued transaction
* clock signals. * will happen when the master initiates a SPI transaction by pulling down CS and sending out clock signals.
*
* This function hands over ownership of the buffers in ``trans_desc`` to the SPI slave driver; the application is
* not to access this memory until ``spi_slave_queue_trans`` is called to hand ownership back to the application.
* *
* @param host SPI peripheral that is acting as a slave * @param host SPI peripheral that is acting as a slave
* @param trans_desc Description of transaction to execute. Not const because we may want to write status back * @param trans_desc Description of transaction to execute. Not const because we may want to write status back
@ -117,8 +120,10 @@ esp_err_t spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transact
* completed transaction so software can inspect the result and e.g. free the memory or * completed transaction so software can inspect the result and e.g. free the memory or
* re-use the buffers. * re-use the buffers.
* *
* It is mandatory to eventually use this function for any transaction queued by ``spi_slave_queue_trans``.
*
* @param host SPI peripheral to that is acting as a slave * @param host SPI peripheral to that is acting as a slave
* @param trans_desc Pointer to variable able to contain a pointer to the description of the * @param[out] trans_desc Pointer to variable able to contain a pointer to the description of the
* transaction that is executed * transaction that is executed
* @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time * @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time
* out. * out.

View file

@ -77,102 +77,106 @@ typedef struct {
/* /*
Bunch of constants for every SPI peripheral: GPIO signals, irqs, hw addr of registers etc Bunch of constants for every SPI peripheral: GPIO signals, irqs, hw addr of registers etc
*/ */
static const spi_signal_conn_t io_signal[3]={ static const spi_signal_conn_t io_signal[3] = {
{ {
.spiclk_out=SPICLK_OUT_IDX, .spiclk_out = SPICLK_OUT_IDX,
.spiclk_in=SPICLK_IN_IDX, .spiclk_in = SPICLK_IN_IDX,
.spid_out=SPID_OUT_IDX, .spid_out = SPID_OUT_IDX,
.spiq_out=SPIQ_OUT_IDX, .spiq_out = SPIQ_OUT_IDX,
.spiwp_out=SPIWP_OUT_IDX, .spiwp_out = SPIWP_OUT_IDX,
.spihd_out=SPIHD_OUT_IDX, .spihd_out = SPIHD_OUT_IDX,
.spid_in=SPID_IN_IDX, .spid_in = SPID_IN_IDX,
.spiq_in=SPIQ_IN_IDX, .spiq_in = SPIQ_IN_IDX,
.spiwp_in=SPIWP_IN_IDX, .spiwp_in = SPIWP_IN_IDX,
.spihd_in=SPIHD_IN_IDX, .spihd_in = SPIHD_IN_IDX,
.spics_out={SPICS0_OUT_IDX, SPICS1_OUT_IDX, SPICS2_OUT_IDX}, .spics_out = {SPICS0_OUT_IDX, SPICS1_OUT_IDX, SPICS2_OUT_IDX},
.spics_in=SPICS0_IN_IDX, .spics_in = SPICS0_IN_IDX,
.spiclk_native=6, .spiclk_native = 6,
.spid_native=8, .spid_native = 8,
.spiq_native=7, .spiq_native = 7,
.spiwp_native=10, .spiwp_native = 10,
.spihd_native=9, .spihd_native = 9,
.spics0_native=11, .spics0_native = 11,
.irq=ETS_SPI1_INTR_SOURCE, .irq = ETS_SPI1_INTR_SOURCE,
.irq_dma=ETS_SPI1_DMA_INTR_SOURCE, .irq_dma = ETS_SPI1_DMA_INTR_SOURCE,
.module=PERIPH_SPI_MODULE, .module = PERIPH_SPI_MODULE,
.hw=&SPI1 .hw = &SPI1
}, { }, {
.spiclk_out=HSPICLK_OUT_IDX, .spiclk_out = HSPICLK_OUT_IDX,
.spiclk_in=HSPICLK_IN_IDX, .spiclk_in = HSPICLK_IN_IDX,
.spid_out=HSPID_OUT_IDX, .spid_out = HSPID_OUT_IDX,
.spiq_out=HSPIQ_OUT_IDX, .spiq_out = HSPIQ_OUT_IDX,
.spiwp_out=HSPIWP_OUT_IDX, .spiwp_out = HSPIWP_OUT_IDX,
.spihd_out=HSPIHD_OUT_IDX, .spihd_out = HSPIHD_OUT_IDX,
.spid_in=HSPID_IN_IDX, .spid_in = HSPID_IN_IDX,
.spiq_in=HSPIQ_IN_IDX, .spiq_in = HSPIQ_IN_IDX,
.spiwp_in=HSPIWP_IN_IDX, .spiwp_in = HSPIWP_IN_IDX,
.spihd_in=HSPIHD_IN_IDX, .spihd_in = HSPIHD_IN_IDX,
.spics_out={HSPICS0_OUT_IDX, HSPICS1_OUT_IDX, HSPICS2_OUT_IDX}, .spics_out = {HSPICS0_OUT_IDX, HSPICS1_OUT_IDX, HSPICS2_OUT_IDX},
.spics_in=HSPICS0_IN_IDX, .spics_in = HSPICS0_IN_IDX,
.spiclk_native=14, .spiclk_native = 14,
.spid_native=13, .spid_native = 13,
.spiq_native=12, .spiq_native = 12,
.spiwp_native=2, .spiwp_native = 2,
.spihd_native=4, .spihd_native = 4,
.spics0_native=15, .spics0_native = 15,
.irq=ETS_SPI2_INTR_SOURCE, .irq = ETS_SPI2_INTR_SOURCE,
.irq_dma=ETS_SPI2_DMA_INTR_SOURCE, .irq_dma = ETS_SPI2_DMA_INTR_SOURCE,
.module=PERIPH_HSPI_MODULE, .module = PERIPH_HSPI_MODULE,
.hw=&SPI2 .hw = &SPI2
}, { }, {
.spiclk_out=VSPICLK_OUT_IDX, .spiclk_out = VSPICLK_OUT_IDX,
.spiclk_in=VSPICLK_IN_IDX, .spiclk_in = VSPICLK_IN_IDX,
.spid_out=VSPID_OUT_IDX, .spid_out = VSPID_OUT_IDX,
.spiq_out=VSPIQ_OUT_IDX, .spiq_out = VSPIQ_OUT_IDX,
.spiwp_out=VSPIWP_OUT_IDX, .spiwp_out = VSPIWP_OUT_IDX,
.spihd_out=VSPIHD_OUT_IDX, .spihd_out = VSPIHD_OUT_IDX,
.spid_in=VSPID_IN_IDX, .spid_in = VSPID_IN_IDX,
.spiq_in=VSPIQ_IN_IDX, .spiq_in = VSPIQ_IN_IDX,
.spiwp_in=VSPIWP_IN_IDX, .spiwp_in = VSPIWP_IN_IDX,
.spihd_in=VSPIHD_IN_IDX, .spihd_in = VSPIHD_IN_IDX,
.spics_out={VSPICS0_OUT_IDX, VSPICS1_OUT_IDX, VSPICS2_OUT_IDX}, .spics_out = {VSPICS0_OUT_IDX, VSPICS1_OUT_IDX, VSPICS2_OUT_IDX},
.spics_in=VSPICS0_IN_IDX, .spics_in = VSPICS0_IN_IDX,
.spiclk_native=18, .spiclk_native = 18,
.spid_native=23, .spid_native = 23,
.spiq_native=19, .spiq_native = 19,
.spiwp_native=22, .spiwp_native = 22,
.spihd_native=21, .spihd_native = 21,
.spics0_native=5, .spics0_native = 5,
.irq=ETS_SPI3_INTR_SOURCE, .irq = ETS_SPI3_INTR_SOURCE,
.irq_dma=ETS_SPI3_DMA_INTR_SOURCE, .irq_dma = ETS_SPI3_DMA_INTR_SOURCE,
.module=PERIPH_VSPI_MODULE, .module = PERIPH_VSPI_MODULE,
.hw=&SPI3 .hw = &SPI3
} }
}; };
//Periph 1 is 'claimed' by SPI flash code. //Periph 1 is 'claimed' by SPI flash code.
static bool spi_periph_claimed[3]={true, false, false}; static bool spi_periph_claimed[3] = {true, false, false};
//Returns true if this peripheral is successfully claimed, false if otherwise. //Returns true if this peripheral is successfully claimed, false if otherwise.
bool spicommon_periph_claim(spi_host_device_t host) { bool spicommon_periph_claim(spi_host_device_t host)
{
bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], false, true); bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], false, true);
if (ret) periph_module_enable(io_signal[host].module); if (ret) periph_module_enable(io_signal[host].module);
return ret; return ret;
} }
//Returns true if this peripheral is successfully freed, false if otherwise. //Returns true if this peripheral is successfully freed, false if otherwise.
bool spicommon_periph_free(spi_host_device_t host) { bool spicommon_periph_free(spi_host_device_t host)
{
bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], true, false); bool ret = __sync_bool_compare_and_swap(&spi_periph_claimed[host], true, false);
if (ret) periph_module_disable(io_signal[host].module); if (ret) periph_module_disable(io_signal[host].module);
return ret; return ret;
} }
int spicommon_irqsource_for_host(spi_host_device_t host) { int spicommon_irqsource_for_host(spi_host_device_t host)
{
return io_signal[host].irq; return io_signal[host].irq;
} }
spi_dev_t *spicommon_hw_for_host(spi_host_device_t host) { spi_dev_t *spicommon_hw_for_host(spi_host_device_t host)
{
return io_signal[host].hw; return io_signal[host].hw;
} }
@ -183,27 +187,27 @@ config can be done using the IOMUX instead of using the GPIO matrix.
*/ */
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, int dma_chan, int flags, bool *is_native) esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, int dma_chan, int flags, bool *is_native)
{ {
bool native=true; bool native = true;
bool use_quad=(flags&SPICOMMON_BUSFLAG_QUAD)?true:false; bool use_quad = (flags & SPICOMMON_BUSFLAG_QUAD) != 0;
SPI_CHECK(bus_config->mosi_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num), "spid pin invalid", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_config->mosi_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num), "spid pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->sclk_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->sclk_io_num), "spiclk pin invalid", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_config->sclk_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->sclk_io_num), "spiclk pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->miso_io_num<0 || GPIO_IS_VALID_GPIO(bus_config->miso_io_num), "spiq pin invalid", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_config->miso_io_num < 0 || GPIO_IS_VALID_GPIO(bus_config->miso_io_num), "spiq pin invalid", ESP_ERR_INVALID_ARG);
if (use_quad) { if (use_quad) {
SPI_CHECK(bus_config->quadwp_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadwp_io_num), "spiwp pin invalid", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_config->quadwp_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadwp_io_num), "spiwp pin invalid", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_config->quadhd_io_num<0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadhd_io_num), "spihd pin invalid", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_config->quadhd_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->quadhd_io_num), "spihd pin invalid", ESP_ERR_INVALID_ARG);
} }
//Check if the selected pins correspond to the native pins of the peripheral //Check if the selected pins correspond to the native pins of the peripheral
if (bus_config->mosi_io_num >= 0 && bus_config->mosi_io_num!=io_signal[host].spid_native) native=false; if (bus_config->mosi_io_num >= 0 && bus_config->mosi_io_num != io_signal[host].spid_native) native = false;
if (bus_config->miso_io_num >= 0 && bus_config->miso_io_num!=io_signal[host].spiq_native) native=false; if (bus_config->miso_io_num >= 0 && bus_config->miso_io_num != io_signal[host].spiq_native) native = false;
if (bus_config->sclk_io_num >= 0 && bus_config->sclk_io_num!=io_signal[host].spiclk_native) native=false; if (bus_config->sclk_io_num >= 0 && bus_config->sclk_io_num != io_signal[host].spiclk_native) native = false;
if (use_quad) { if (use_quad) {
if (bus_config->quadwp_io_num >= 0 && bus_config->quadwp_io_num!=io_signal[host].spiwp_native) native=false; if (bus_config->quadwp_io_num >= 0 && bus_config->quadwp_io_num != io_signal[host].spiwp_native) native = false;
if (bus_config->quadhd_io_num >= 0 && bus_config->quadhd_io_num!=io_signal[host].spihd_native) native=false; if (bus_config->quadhd_io_num >= 0 && bus_config->quadhd_io_num != io_signal[host].spihd_native) native = false;
} }
*is_native=native; *is_native = native;
if (native) { if (native) {
//All SPI native pin selections resolve to 1, so we put that here instead of trying to figure //All SPI native pin selections resolve to 1, so we put that here instead of trying to figure
@ -215,31 +219,31 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
if (bus_config->sclk_io_num > 0) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], 1); if (bus_config->sclk_io_num > 0) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], 1);
} else { } else {
//Use GPIO //Use GPIO
if (bus_config->mosi_io_num>0) { if (bus_config->mosi_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->mosi_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->mosi_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->mosi_io_num, GPIO_MODE_INPUT_OUTPUT); gpio_set_direction(bus_config->mosi_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->mosi_io_num, io_signal[host].spid_out, false, false); gpio_matrix_out(bus_config->mosi_io_num, io_signal[host].spid_out, false, false);
gpio_matrix_in(bus_config->mosi_io_num, io_signal[host].spid_in, false); gpio_matrix_in(bus_config->mosi_io_num, io_signal[host].spid_in, false);
} }
if (bus_config->miso_io_num>0) { if (bus_config->miso_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->miso_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->miso_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->miso_io_num, GPIO_MODE_INPUT_OUTPUT); gpio_set_direction(bus_config->miso_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->miso_io_num, io_signal[host].spiq_out, false, false); gpio_matrix_out(bus_config->miso_io_num, io_signal[host].spiq_out, false, false);
gpio_matrix_in(bus_config->miso_io_num, io_signal[host].spiq_in, false); gpio_matrix_in(bus_config->miso_io_num, io_signal[host].spiq_in, false);
} }
if (use_quad && bus_config->quadwp_io_num>0) { if (use_quad && bus_config->quadwp_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadwp_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadwp_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->quadwp_io_num, GPIO_MODE_INPUT_OUTPUT); gpio_set_direction(bus_config->quadwp_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->quadwp_io_num, io_signal[host].spiwp_out, false, false); gpio_matrix_out(bus_config->quadwp_io_num, io_signal[host].spiwp_out, false, false);
gpio_matrix_in(bus_config->quadwp_io_num, io_signal[host].spiwp_in, false); gpio_matrix_in(bus_config->quadwp_io_num, io_signal[host].spiwp_in, false);
} }
if (use_quad && bus_config->quadhd_io_num>0) { if (use_quad && bus_config->quadhd_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadhd_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->quadhd_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->quadhd_io_num, GPIO_MODE_INPUT_OUTPUT); gpio_set_direction(bus_config->quadhd_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->quadhd_io_num, io_signal[host].spihd_out, false, false); gpio_matrix_out(bus_config->quadhd_io_num, io_signal[host].spihd_out, false, false);
gpio_matrix_in(bus_config->quadhd_io_num, io_signal[host].spihd_in, false); gpio_matrix_in(bus_config->quadhd_io_num, io_signal[host].spihd_in, false);
} }
if (bus_config->sclk_io_num>0) { if (bus_config->sclk_io_num > 0) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[bus_config->sclk_io_num], PIN_FUNC_GPIO);
gpio_set_direction(bus_config->sclk_io_num, GPIO_MODE_INPUT_OUTPUT); gpio_set_direction(bus_config->sclk_io_num, GPIO_MODE_INPUT_OUTPUT);
gpio_matrix_out(bus_config->sclk_io_num, io_signal[host].spiclk_out, false, false); gpio_matrix_out(bus_config->sclk_io_num, io_signal[host].spiclk_out, false, false);
@ -255,9 +259,10 @@ esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_conf
//Find any pin with output muxed to ``func`` and reset it to GPIO //Find any pin with output muxed to ``func`` and reset it to GPIO
static void reset_func_to_gpio(int func) { static void reset_func_to_gpio(int func)
for (int x=0; x<GPIO_PIN_COUNT; x++) { {
if (GPIO_IS_VALID_GPIO(x) && (READ_PERI_REG(GPIO_FUNC0_OUT_SEL_CFG_REG+(x*4))&GPIO_FUNC0_OUT_SEL_M)==func) { for (int x = 0; x < GPIO_PIN_COUNT; x++) {
if (GPIO_IS_VALID_GPIO(x) && (READ_PERI_REG(GPIO_FUNC0_OUT_SEL_CFG_REG + (x * 4))&GPIO_FUNC0_OUT_SEL_M) == func) {
gpio_matrix_out(x, SIG_GPIO_OUT_IDX, false, false); gpio_matrix_out(x, SIG_GPIO_OUT_IDX, false, false);
} }
} }
@ -266,11 +271,11 @@ static void reset_func_to_gpio(int func) {
esp_err_t spicommon_bus_free_io(spi_host_device_t host) esp_err_t spicommon_bus_free_io(spi_host_device_t host)
{ {
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spid_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spid_native], PIN_FUNC_GPIO); if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spid_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spid_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], PIN_FUNC_GPIO); if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiq_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], PIN_FUNC_GPIO); if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiclk_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], PIN_FUNC_GPIO); if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spiwp_native], PIN_FUNC_GPIO);
if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], MCU_SEL)==1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], PIN_FUNC_GPIO); if (REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], MCU_SEL) == 1) PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spihd_native], PIN_FUNC_GPIO);
reset_func_to_gpio(io_signal[host].spid_out); reset_func_to_gpio(io_signal[host].spid_out);
reset_func_to_gpio(io_signal[host].spiq_out); reset_func_to_gpio(io_signal[host].spiq_out);
reset_func_to_gpio(io_signal[host].spiclk_out); reset_func_to_gpio(io_signal[host].spiclk_out);
@ -279,50 +284,53 @@ esp_err_t spicommon_bus_free_io(spi_host_device_t host)
return ESP_OK; return ESP_OK;
} }
void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_num, int force_gpio_matrix) { void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_num, int force_gpio_matrix)
if (!force_gpio_matrix && cs_io_num == io_signal[host].spics0_native && cs_num==0) { {
if (!force_gpio_matrix && cs_io_num == io_signal[host].spics0_native && cs_num == 0) {
//The cs0s for all SPI peripherals map to pin mux source 1, so we use that instead of a define. //The cs0s for all SPI peripherals map to pin mux source 1, so we use that instead of a define.
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], 1); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], 1);
} else { } else {
//Use GPIO matrix //Use GPIO matrix
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[cs_io_num], PIN_FUNC_GPIO);
gpio_matrix_out(cs_io_num, io_signal[host].spics_out[cs_num], false, false); gpio_matrix_out(cs_io_num, io_signal[host].spics_out[cs_num], false, false);
if (cs_num==0) gpio_matrix_in(cs_io_num, io_signal[host].spics_in, false); if (cs_num == 0) gpio_matrix_in(cs_io_num, io_signal[host].spics_in, false);
} }
} }
void spicommon_cs_free(spi_host_device_t host, int cs_io_num) { void spicommon_cs_free(spi_host_device_t host, int cs_io_num)
if (cs_io_num==0 && REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], MCU_SEL)==1) { {
if (cs_io_num == 0 && REG_GET_FIELD(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], MCU_SEL) == 1) {
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], PIN_FUNC_GPIO); PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[io_signal[host].spics0_native], PIN_FUNC_GPIO);
} }
reset_func_to_gpio(io_signal[host].spics_out[cs_io_num]); reset_func_to_gpio(io_signal[host].spics_out[cs_io_num]);
} }
//Set up a list of dma descriptors. dmadesc is an array of descriptors. Data is the buffer to point to. //Set up a list of dma descriptors. dmadesc is an array of descriptors. Data is the buffer to point to.
void spicommon_setup_dma_desc_links(lldesc_t *dmadesc, int len, const uint8_t *data, bool isrx) { void spicommon_setup_dma_desc_links(lldesc_t *dmadesc, int len, const uint8_t *data, bool isrx)
int n=0; {
int n = 0;
while (len) { while (len) {
int dmachunklen=len; int dmachunklen = len;
if (dmachunklen > SPI_MAX_DMA_LEN) dmachunklen=SPI_MAX_DMA_LEN; if (dmachunklen > SPI_MAX_DMA_LEN) dmachunklen = SPI_MAX_DMA_LEN;
if (isrx) { if (isrx) {
//Receive needs DMA length rounded to next 32-bit boundary //Receive needs DMA length rounded to next 32-bit boundary
dmadesc[n].size=(dmachunklen+3)&(~3); dmadesc[n].size = (dmachunklen + 3) & (~3);
dmadesc[n].length=(dmachunklen+3)&(~3); dmadesc[n].length = (dmachunklen + 3) & (~3);
} else { } else {
dmadesc[n].size=dmachunklen; dmadesc[n].size = dmachunklen;
dmadesc[n].length=dmachunklen; dmadesc[n].length = dmachunklen;
} }
dmadesc[n].buf=(uint8_t*)data; dmadesc[n].buf = (uint8_t *)data;
dmadesc[n].eof=0; dmadesc[n].eof = 0;
dmadesc[n].sosf=0; dmadesc[n].sosf = 0;
dmadesc[n].owner=1; dmadesc[n].owner = 1;
dmadesc[n].qe.stqe_next=&dmadesc[n+1]; dmadesc[n].qe.stqe_next = &dmadesc[n + 1];
len-=dmachunklen; len -= dmachunklen;
data+=dmachunklen; data += dmachunklen;
n++; n++;
} }
dmadesc[n-1].eof=1; //Mark last DMA desc as end of stream. dmadesc[n - 1].eof = 1; //Mark last DMA desc as end of stream.
dmadesc[n-1].qe.stqe_next=NULL; dmadesc[n - 1].qe.stqe_next = NULL;
} }
@ -331,28 +339,28 @@ Code for workaround for DMA issue in ESP32 v0/v1 silicon
*/ */
static volatile int dmaworkaround_channels_busy[2]={0,0}; static volatile int dmaworkaround_channels_busy[2] = {0, 0};
static dmaworkaround_cb_t dmaworkaround_cb; static dmaworkaround_cb_t dmaworkaround_cb;
static void *dmaworkaround_cb_arg; static void *dmaworkaround_cb_arg;
static portMUX_TYPE dmaworkaround_mux=portMUX_INITIALIZER_UNLOCKED; static portMUX_TYPE dmaworkaround_mux = portMUX_INITIALIZER_UNLOCKED;
static int dmaworkaround_waiting_for_chan=0; static int dmaworkaround_waiting_for_chan = 0;
bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t cb, void *arg) bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t cb, void *arg)
{ {
int otherchan=(dmachan==1)?2:1; int otherchan = (dmachan == 1) ? 2 : 1;
bool ret; bool ret;
portENTER_CRITICAL(&dmaworkaround_mux); portENTER_CRITICAL(&dmaworkaround_mux);
if (dmaworkaround_channels_busy[otherchan]) { if (dmaworkaround_channels_busy[otherchan]) {
//Other channel is busy. Call back when it's done. //Other channel is busy. Call back when it's done.
dmaworkaround_cb=cb; dmaworkaround_cb = cb;
dmaworkaround_cb_arg=arg; dmaworkaround_cb_arg = arg;
dmaworkaround_waiting_for_chan=otherchan; dmaworkaround_waiting_for_chan = otherchan;
ret=false; ret = false;
} else { } else {
//Reset DMA //Reset DMA
SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST); SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST); CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
ret=true; ret = true;
} }
portEXIT_CRITICAL(&dmaworkaround_mux); portEXIT_CRITICAL(&dmaworkaround_mux);
return ret; return ret;
@ -360,17 +368,18 @@ bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t
bool IRAM_ATTR spicommon_dmaworkaround_reset_in_progress() bool IRAM_ATTR spicommon_dmaworkaround_reset_in_progress()
{ {
return (dmaworkaround_waiting_for_chan!=0); return (dmaworkaround_waiting_for_chan != 0);
} }
void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan) { void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan)
{
portENTER_CRITICAL(&dmaworkaround_mux); portENTER_CRITICAL(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan]=0; dmaworkaround_channels_busy[dmachan] = 0;
if (dmaworkaround_waiting_for_chan == dmachan) { if (dmaworkaround_waiting_for_chan == dmachan) {
//Reset DMA //Reset DMA
SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST); SET_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST); CLEAR_PERI_REG_MASK(DPORT_PERIP_RST_EN_REG, DPORT_SPI_DMA_RST);
dmaworkaround_waiting_for_chan=0; dmaworkaround_waiting_for_chan = 0;
//Call callback //Call callback
dmaworkaround_cb(dmaworkaround_cb_arg); dmaworkaround_cb(dmaworkaround_cb_arg);
@ -378,9 +387,10 @@ void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan) {
portEXIT_CRITICAL(&dmaworkaround_mux); portEXIT_CRITICAL(&dmaworkaround_mux);
} }
void IRAM_ATTR spicommon_dmaworkaround_transfer_active(int dmachan) { void IRAM_ATTR spicommon_dmaworkaround_transfer_active(int dmachan)
{
portENTER_CRITICAL(&dmaworkaround_mux); portENTER_CRITICAL(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan]=1; dmaworkaround_channels_busy[dmachan] = 1;
portEXIT_CRITICAL(&dmaworkaround_mux); portEXIT_CRITICAL(&dmaworkaround_mux);
} }

View file

@ -71,100 +71,100 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
//We only support HSPI/VSPI, period. //We only support HSPI/VSPI, period.
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
claimed=spicommon_periph_claim(host); claimed = spicommon_periph_claim(host);
SPI_CHECK(claimed, "host already in use", ESP_ERR_INVALID_STATE); SPI_CHECK(claimed, "host already in use", ESP_ERR_INVALID_STATE);
spihost[host]=malloc(sizeof(spi_slave_t)); spihost[host] = malloc(sizeof(spi_slave_t));
if (spihost[host]==NULL) goto nomem; if (spihost[host] == NULL) goto nomem;
memset(spihost[host], 0, sizeof(spi_slave_t)); memset(spihost[host], 0, sizeof(spi_slave_t));
memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t)); memcpy(&spihost[host]->cfg, slave_config, sizeof(spi_slave_interface_config_t));
spicommon_bus_initialize_io(host, bus_config, dma_chan, SPICOMMON_BUSFLAG_SLAVE, &native); spicommon_bus_initialize_io(host, bus_config, dma_chan, SPICOMMON_BUSFLAG_SLAVE, &native);
gpio_set_direction(slave_config->spics_io_num, GPIO_MODE_INPUT); gpio_set_direction(slave_config->spics_io_num, GPIO_MODE_INPUT);
spicommon_cs_initialize(host, slave_config->spics_io_num, 0, native == false); spicommon_cs_initialize(host, slave_config->spics_io_num, 0, native == false);
spihost[host]->no_gpio_matrix=native; spihost[host]->no_gpio_matrix = native;
spihost[host]->dma_chan=dma_chan; spihost[host]->dma_chan = dma_chan;
if (dma_chan!=0) { if (dma_chan != 0) {
//See how many dma descriptors we need and allocate them //See how many dma descriptors we need and allocate them
int dma_desc_ct=(bus_config->max_transfer_sz+SPI_MAX_DMA_LEN-1)/SPI_MAX_DMA_LEN; int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN;
if (dma_desc_ct==0) dma_desc_ct=1; //default to 4k when max is not given if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
spihost[host]->max_transfer_sz = dma_desc_ct*SPI_MAX_DMA_LEN; spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
spihost[host]->dmadesc_tx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); spihost[host]->dmadesc_tx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
spihost[host]->dmadesc_rx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); spihost[host]->dmadesc_rx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem; if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem;
} else { } else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most. //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
spihost[host]->max_transfer_sz=16*4; spihost[host]->max_transfer_sz = 16 * 4;
} }
//Create queues //Create queues
spihost[host]->trans_queue=xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *)); spihost[host]->trans_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
spihost[host]->ret_queue=xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *)); spihost[host]->ret_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_transaction_t *));
if (!spihost[host]->trans_queue || !spihost[host]->ret_queue) goto nomem; if (!spihost[host]->trans_queue || !spihost[host]->ret_queue) goto nomem;
esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void*)spihost[host], &spihost[host]->intr); esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void *)spihost[host], &spihost[host]->intr);
spihost[host]->hw=spicommon_hw_for_host(host); spihost[host]->hw = spicommon_hw_for_host(host);
//Configure slave //Configure slave
spihost[host]->hw->clock.val=0; spihost[host]->hw->clock.val = 0;
spihost[host]->hw->user.val=0; spihost[host]->hw->user.val = 0;
spihost[host]->hw->ctrl.val=0; spihost[host]->hw->ctrl.val = 0;
spihost[host]->hw->slave.wr_rd_buf_en=1; //no sure if needed spihost[host]->hw->slave.wr_rd_buf_en = 1; //no sure if needed
spihost[host]->hw->user.doutdin=1; //we only support full duplex spihost[host]->hw->user.doutdin = 1; //we only support full duplex
spihost[host]->hw->user.sio=0; spihost[host]->hw->user.sio = 0;
spihost[host]->hw->slave.slave_mode=1; spihost[host]->hw->slave.slave_mode = 1;
spihost[host]->hw->dma_conf.val |= SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST; spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start=0; spihost[host]->hw->dma_out_link.start = 0;
spihost[host]->hw->dma_in_link.start=0; spihost[host]->hw->dma_in_link.start = 0;
spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST); spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
spihost[host]->hw->dma_conf.out_data_burst_en=1; spihost[host]->hw->dma_conf.out_data_burst_en = 1;
spihost[host]->hw->slave.sync_reset=1; spihost[host]->hw->slave.sync_reset = 1;
spihost[host]->hw->slave.sync_reset=0; spihost[host]->hw->slave.sync_reset = 0;
bool nodelay=true; bool nodelay = true;
spihost[host]->hw->ctrl.rd_bit_order=(slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST)?1:0; spihost[host]->hw->ctrl.rd_bit_order = (slave_config->flags & SPI_SLAVE_RXBIT_LSBFIRST) ? 1 : 0;
spihost[host]->hw->ctrl.wr_bit_order=(slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST)?1:0; spihost[host]->hw->ctrl.wr_bit_order = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0;
if (slave_config->mode==0) { if (slave_config->mode == 0) {
spihost[host]->hw->pin.ck_idle_edge=0; spihost[host]->hw->pin.ck_idle_edge = 0;
spihost[host]->hw->user.ck_i_edge=1; spihost[host]->hw->user.ck_i_edge = 1;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:2; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2;
} else if (slave_config->mode==1) { } else if (slave_config->mode == 1) {
spihost[host]->hw->pin.ck_idle_edge=0; spihost[host]->hw->pin.ck_idle_edge = 0;
spihost[host]->hw->user.ck_i_edge=0; spihost[host]->hw->user.ck_i_edge = 0;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:1; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1;
} else if (slave_config->mode==2) { } else if (slave_config->mode == 2) {
spihost[host]->hw->pin.ck_idle_edge=1; spihost[host]->hw->pin.ck_idle_edge = 1;
spihost[host]->hw->user.ck_i_edge=0; spihost[host]->hw->user.ck_i_edge = 0;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:1; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 1;
} else if (slave_config->mode==3) { } else if (slave_config->mode == 3) {
spihost[host]->hw->pin.ck_idle_edge=1; spihost[host]->hw->pin.ck_idle_edge = 1;
spihost[host]->hw->user.ck_i_edge=1; spihost[host]->hw->user.ck_i_edge = 1;
spihost[host]->hw->ctrl2.miso_delay_mode=nodelay?0:2; spihost[host]->hw->ctrl2.miso_delay_mode = nodelay ? 0 : 2;
} }
//Reset DMA //Reset DMA
spihost[host]->hw->dma_conf.val|=SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST; spihost[host]->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start=0; spihost[host]->hw->dma_out_link.start = 0;
spihost[host]->hw->dma_in_link.start=0; spihost[host]->hw->dma_in_link.start = 0;
spihost[host]->hw->dma_conf.val&=~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST); spihost[host]->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
//Disable unneeded ints //Disable unneeded ints
spihost[host]->hw->slave.rd_buf_done=0; spihost[host]->hw->slave.rd_buf_done = 0;
spihost[host]->hw->slave.wr_buf_done=0; spihost[host]->hw->slave.wr_buf_done = 0;
spihost[host]->hw->slave.rd_sta_done=0; spihost[host]->hw->slave.rd_sta_done = 0;
spihost[host]->hw->slave.wr_sta_done=0; spihost[host]->hw->slave.wr_sta_done = 0;
spihost[host]->hw->slave.rd_buf_inten=0; spihost[host]->hw->slave.rd_buf_inten = 0;
spihost[host]->hw->slave.wr_buf_inten=0; spihost[host]->hw->slave.wr_buf_inten = 0;
spihost[host]->hw->slave.rd_sta_inten=0; spihost[host]->hw->slave.rd_sta_inten = 0;
spihost[host]->hw->slave.wr_sta_inten=0; spihost[host]->hw->slave.wr_sta_inten = 0;
//Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as //Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as
//disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling //disabled. This way, we can just enable the SPI interrupt and the interrupt handler will kick in, handling
//any transactions that are queued. //any transactions that are queued.
spihost[host]->hw->slave.trans_inten=1; spihost[host]->hw->slave.trans_inten = 1;
spihost[host]->hw->slave.trans_done=1; spihost[host]->hw->slave.trans_done = 1;
return ESP_OK; return ESP_OK;
@ -176,7 +176,7 @@ nomem:
free(spihost[host]->dmadesc_rx); free(spihost[host]->dmadesc_rx);
} }
free(spihost[host]); free(spihost[host]);
spihost[host]=NULL; spihost[host] = NULL;
spicommon_periph_free(host); spicommon_periph_free(host);
return ESP_ERR_NO_MEM; return ESP_ERR_NO_MEM;
} }
@ -190,9 +190,9 @@ esp_err_t spi_slave_free(spi_host_device_t host)
free(spihost[host]->dmadesc_tx); free(spihost[host]->dmadesc_tx);
free(spihost[host]->dmadesc_rx); free(spihost[host]->dmadesc_rx);
free(spihost[host]); free(spihost[host]);
spihost[host]=NULL; spihost[host] = NULL;
spicommon_periph_free(host); spicommon_periph_free(host);
spihost[host]=NULL; spihost[host] = NULL;
return ESP_OK; return ESP_OK;
} }
@ -203,8 +203,8 @@ esp_err_t spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transact
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz*8, "data transfer > host maximum", ESP_ERR_INVALID_ARG); SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG);
r=xQueueSend(spihost[host]->trans_queue, (void*)&trans_desc, ticks_to_wait); r = xQueueSend(spihost[host]->trans_queue, (void *)&trans_desc, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT; if (!r) return ESP_ERR_TIMEOUT;
esp_intr_enable(spihost[host]->intr); esp_intr_enable(spihost[host]->intr);
return ESP_OK; return ESP_OK;
@ -216,7 +216,7 @@ esp_err_t spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transacti
BaseType_t r; BaseType_t r;
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
r=xQueueReceive(spihost[host]->ret_queue, (void*)trans_desc, ticks_to_wait); r = xQueueReceive(spihost[host]->ret_queue, (void *)trans_desc, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT; if (!r) return ESP_ERR_TIMEOUT;
return ESP_OK; return ESP_OK;
} }
@ -227,16 +227,17 @@ esp_err_t spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *tr
esp_err_t ret; esp_err_t ret;
spi_slave_transaction_t *ret_trans; spi_slave_transaction_t *ret_trans;
//ToDo: check if any spi transfers in flight //ToDo: check if any spi transfers in flight
ret=spi_slave_queue_trans(host, trans_desc, ticks_to_wait); ret = spi_slave_queue_trans(host, trans_desc, ticks_to_wait);
if (ret!=ESP_OK) return ret; if (ret != ESP_OK) return ret;
ret=spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait); ret = spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait);
if (ret!=ESP_OK) return ret; if (ret != ESP_OK) return ret;
assert(ret_trans==trans_desc); assert(ret_trans == trans_desc);
return ESP_OK; return ESP_OK;
} }
#ifdef DEBUG_SLAVE #ifdef DEBUG_SLAVE
static void dumpregs(spi_dev_t *hw) { static void dumpregs(spi_dev_t *hw)
{
ets_printf("***REG DUMP ***\n"); ets_printf("***REG DUMP ***\n");
ets_printf("mosi_dlen : %08X\n", hw->mosi_dlen.val); ets_printf("mosi_dlen : %08X\n", hw->mosi_dlen.val);
ets_printf("miso_dlen : %08X\n", hw->miso_dlen.val); ets_printf("miso_dlen : %08X\n", hw->miso_dlen.val);
@ -249,17 +250,18 @@ static void dumpregs(spi_dev_t *hw) {
} }
static void dumpll(lldesc_t *ll) { static void dumpll(lldesc_t *ll)
{
ets_printf("****LL DUMP****\n"); ets_printf("****LL DUMP****\n");
ets_printf("Size %d\n", ll->size); ets_printf("Size %d\n", ll->size);
ets_printf("Len: %d\n", ll->length); ets_printf("Len: %d\n", ll->length);
ets_printf("Owner: %s\n", ll->owner?"dma":"cpu"); ets_printf("Owner: %s\n", ll->owner ? "dma" : "cpu");
} }
#endif #endif
static void IRAM_ATTR spi_slave_restart_after_dmareset(void *arg) static void IRAM_ATTR spi_slave_restart_after_dmareset(void *arg)
{ {
spi_slave_t *host=(spi_slave_t*)arg; spi_slave_t *host = (spi_slave_t *)arg;
esp_intr_enable(host->intr); esp_intr_enable(host->intr);
} }
@ -269,9 +271,9 @@ static void IRAM_ATTR spi_slave_restart_after_dmareset(void *arg)
static void IRAM_ATTR spi_intr(void *arg) static void IRAM_ATTR spi_intr(void *arg)
{ {
BaseType_t r; BaseType_t r;
BaseType_t do_yield=pdFALSE; BaseType_t do_yield = pdFALSE;
spi_slave_transaction_t *trans=NULL; spi_slave_transaction_t *trans = NULL;
spi_slave_t *host=(spi_slave_t*)arg; spi_slave_t *host = (spi_slave_t *)arg;
#ifdef DEBUG_SLAVE #ifdef DEBUG_SLAVE
dumpregs(host->hw); dumpregs(host->hw);
@ -284,20 +286,20 @@ static void IRAM_ATTR spi_intr(void *arg)
if (host->cur_trans) { if (host->cur_trans) {
if (host->dma_chan == 0 && host->cur_trans->rx_buffer) { if (host->dma_chan == 0 && host->cur_trans->rx_buffer) {
//Copy result out //Copy result out
uint32_t *data=host->cur_trans->rx_buffer; uint32_t *data = host->cur_trans->rx_buffer;
for (int x=0; x<host->cur_trans->length; x+=32) { for (int x = 0; x < host->cur_trans->length; x += 32) {
uint32_t word; uint32_t word;
int len=host->cur_trans->length-x; int len = host->cur_trans->length - x;
if (len>32) len=32; if (len > 32) len = 32;
word=host->hw->data_buf[(x/32)]; word = host->hw->data_buf[(x / 32)];
memcpy(&data[x/32], &word, (len+7)/8); memcpy(&data[x / 32], &word, (len + 7) / 8);
} }
} else if (host->dma_chan != 0 && host->cur_trans->rx_buffer) { } else if (host->dma_chan != 0 && host->cur_trans->rx_buffer) {
int i; int i;
//In case CS goes high too soon, the transfer is aborted while the DMA channel still thinks it's going. This //In case CS goes high too soon, the transfer is aborted while the DMA channel still thinks it's going. This
//leads to issues later on, so in that case we need to reset the channel. The state can be detected because //leads to issues later on, so in that case we need to reset the channel. The state can be detected because
//the DMA system doesn't give back the offending descriptor; the owner is still set to DMA. //the DMA system doesn't give back the offending descriptor; the owner is still set to DMA.
for (i=0; host->dmadesc_rx[i].eof==0 && host->dmadesc_rx[i].owner==0; i++) ; for (i = 0; host->dmadesc_rx[i].eof == 0 && host->dmadesc_rx[i].owner == 0; i++) ;
if (host->dmadesc_rx[i].owner) { if (host->dmadesc_rx[i].owner) {
spicommon_dmaworkaround_req_reset(host->dma_chan, spi_slave_restart_after_dmareset, host); spicommon_dmaworkaround_req_reset(host->dma_chan, spi_slave_restart_after_dmareset, host);
} }
@ -306,9 +308,9 @@ static void IRAM_ATTR spi_intr(void *arg)
//Okay, transaction is done. //Okay, transaction is done.
//Return transaction descriptor. //Return transaction descriptor.
xQueueSendFromISR(host->ret_queue, &host->cur_trans, &do_yield); xQueueSendFromISR(host->ret_queue, &host->cur_trans, &do_yield);
host->cur_trans=NULL; host->cur_trans = NULL;
} }
if (host->dma_chan!=0) { if (host->dma_chan != 0) {
spicommon_dmaworkaround_idle(host->dma_chan); spicommon_dmaworkaround_idle(host->dma_chan);
if (spicommon_dmaworkaround_reset_in_progress()) { if (spicommon_dmaworkaround_reset_in_progress()) {
//We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr. //We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr.
@ -319,71 +321,71 @@ static void IRAM_ATTR spi_intr(void *arg)
} }
//Grab next transaction //Grab next transaction
r=xQueueReceiveFromISR(host->trans_queue, &trans, &do_yield); r = xQueueReceiveFromISR(host->trans_queue, &trans, &do_yield);
if (!r) { if (!r) {
//No packet waiting. Disable interrupt. //No packet waiting. Disable interrupt.
esp_intr_disable(host->intr); esp_intr_disable(host->intr);
} else { } else {
//We have a transaction. Send it. //We have a transaction. Send it.
host->hw->slave.trans_done=0; //clear int bit host->hw->slave.trans_done = 0; //clear int bit
host->cur_trans=trans; host->cur_trans = trans;
if (host->dma_chan != 0) { if (host->dma_chan != 0) {
spicommon_dmaworkaround_transfer_active(host->dma_chan); spicommon_dmaworkaround_transfer_active(host->dma_chan);
host->hw->dma_conf.val |= SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST; host->hw->dma_conf.val |= SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST;
host->hw->dma_out_link.start=0; host->hw->dma_out_link.start = 0;
host->hw->dma_in_link.start=0; host->hw->dma_in_link.start = 0;
host->hw->dma_conf.val &= ~(SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST); host->hw->dma_conf.val &= ~(SPI_OUT_RST | SPI_IN_RST | SPI_AHBM_RST | SPI_AHBM_FIFO_RST);
host->hw->dma_conf.out_data_burst_en=0; host->hw->dma_conf.out_data_burst_en = 0;
host->hw->dma_conf.indscr_burst_en=0; host->hw->dma_conf.indscr_burst_en = 0;
host->hw->dma_conf.outdscr_burst_en=0; host->hw->dma_conf.outdscr_burst_en = 0;
//Fill DMA descriptors //Fill DMA descriptors
if (trans->rx_buffer) { if (trans->rx_buffer) {
host->hw->user.usr_miso_highpart=0; host->hw->user.usr_miso_highpart = 0;
spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->length+7)/8), trans->rx_buffer, true); spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->length + 7) / 8), trans->rx_buffer, true);
host->hw->dma_in_link.addr=(int)(&host->dmadesc_rx[0]) & 0xFFFFF; host->hw->dma_in_link.addr = (int)(&host->dmadesc_rx[0]) & 0xFFFFF;
host->hw->dma_in_link.start=1; host->hw->dma_in_link.start = 1;
} }
if (trans->tx_buffer) { if (trans->tx_buffer) {
spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length+7)/8, trans->tx_buffer, false); spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length + 7) / 8, trans->tx_buffer, false);
host->hw->user.usr_mosi_highpart=0; host->hw->user.usr_mosi_highpart = 0;
host->hw->dma_out_link.addr=(int)(&host->dmadesc_tx[0]) & 0xFFFFF; host->hw->dma_out_link.addr = (int)(&host->dmadesc_tx[0]) & 0xFFFFF;
host->hw->dma_out_link.start=1; host->hw->dma_out_link.start = 1;
} }
host->hw->slave.sync_reset=1; host->hw->slave.sync_reset = 1;
host->hw->slave.sync_reset=0; host->hw->slave.sync_reset = 0;
} else { } else {
//No DMA. Turn off SPI and copy data to transmit buffers. //No DMA. Turn off SPI and copy data to transmit buffers.
host->hw->cmd.usr=0; host->hw->cmd.usr = 0;
host->hw->slave.sync_reset=1; host->hw->slave.sync_reset = 1;
host->hw->slave.sync_reset=0; host->hw->slave.sync_reset = 0;
host->hw->user.usr_miso_highpart=0; host->hw->user.usr_miso_highpart = 0;
host->hw->user.usr_mosi_highpart=0; host->hw->user.usr_mosi_highpart = 0;
if (trans->tx_buffer) { if (trans->tx_buffer) {
const uint32_t *data=host->cur_trans->tx_buffer; const uint32_t *data = host->cur_trans->tx_buffer;
for (int x=0; x<trans->length; x+=32) { for (int x = 0; x < trans->length; x += 32) {
uint32_t word; uint32_t word;
memcpy(&word, &data[x/32], 4); memcpy(&word, &data[x / 32], 4);
host->hw->data_buf[(x/32)]=word; host->hw->data_buf[(x / 32)] = word;
} }
} }
} }
host->hw->slv_rd_bit.slv_rdata_bit=0; host->hw->slv_rd_bit.slv_rdata_bit = 0;
host->hw->slv_wrbuf_dlen.bit_len=trans->length-1; host->hw->slv_wrbuf_dlen.bit_len = trans->length - 1;
host->hw->slv_rdbuf_dlen.bit_len=trans->length-1; host->hw->slv_rdbuf_dlen.bit_len = trans->length - 1;
host->hw->mosi_dlen.usr_mosi_dbitlen=trans->length-1; host->hw->mosi_dlen.usr_mosi_dbitlen = trans->length - 1;
host->hw->miso_dlen.usr_miso_dbitlen=trans->length-1; host->hw->miso_dlen.usr_miso_dbitlen = trans->length - 1;
host->hw->user.usr_mosi=(trans->tx_buffer==NULL)?0:1; host->hw->user.usr_mosi = (trans->tx_buffer == NULL) ? 0 : 1;
host->hw->user.usr_miso=(trans->rx_buffer==NULL)?0:1; host->hw->user.usr_miso = (trans->rx_buffer == NULL) ? 0 : 1;
//Kick off transfer //Kick off transfer
host->hw->cmd.usr=1; host->hw->cmd.usr = 1;
if (host->cfg.post_setup_cb) host->cfg.post_setup_cb(trans); if (host->cfg.post_setup_cb) host->cfg.post_setup_cb(trans);
} }
if (do_yield) portYIELD_FROM_ISR(); if (do_yield) portYIELD_FROM_ISR();