fix(spi_master): fix SPI master, now able to transfer more than 32 bytes in non-DMA-accessable area.

apply for internal space to hold these data.
use DMA when it's enabled, no matter long or short.

TW#12226, github#551
This commit is contained in:
michael 2017-07-17 11:37:32 +08:00
parent 8f44efc619
commit 227b011f6a
3 changed files with 210 additions and 60 deletions

View file

@ -67,21 +67,21 @@ typedef struct {
#define SPI_TRANS_USE_TXDATA (1<<3) ///< Transmit tx_data member of spi_transaction_t instead of data at tx_buffer. Do not set tx_buffer when using this.
/**
* This structure describes one SPI transaction
* This structure describes one SPI transaction. The descriptor should not be modified until the transaction finishes.
*/
struct spi_transaction_t {
uint32_t flags; ///< Bitwise OR of SPI_TRANS_* flags
uint16_t command; ///< Command data. Specific length was given when device was added to the bus.
uint64_t address; ///< Address. Specific length was given when device was added to the bus.
size_t length; ///< Total data length, in bits
size_t rxlength; ///< Total data length received, if different from length. (0 defaults this to the value of ``length``)
size_t rxlength; ///< Total data length received, should be not greater than ``length`` in full-duplex mode. (0 defaults this to the value of ``length``)
void *user; ///< User-defined variable. Can be used to store eg transaction ID.
union {
const void *tx_buffer; ///< Pointer to transmit buffer, or NULL for no MOSI phase
uint8_t tx_data[4]; ///< If SPI_USE_TXDATA is set, data set here is sent directly from this variable.
};
union {
void *rx_buffer; ///< Pointer to receive buffer, or NULL for no MISO phase
void *rx_buffer; ///< Pointer to receive buffer, or NULL for no MISO phase. Written by 4 bytes-unit if DMA is used.
uint8_t rx_data[4]; ///< If SPI_USE_RXDATA is set, data is received directly to this variable
};
};
@ -182,8 +182,9 @@ esp_err_t spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *
* re-use the buffers.
*
* @param handle Device handle obtained using spi_host_add_dev
* @param trans_desc Pointer to variable able to contain a pointer to the description of the
* transaction that is executed
* @param trans_desc Pointer to variable able to contain a pointer to the description of the transaction
that is executed. The descriptor should not be modified until the descriptor is returned by
spi_device_get_trans_result.
* @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time
out.
* @return

View file

@ -64,11 +64,20 @@ typedef struct spi_device_t spi_device_t;
#define NO_CS 3 //Number of CS pins per SPI host
/// struct to hold private transaction data (like tx and rx buffer for DMA).
typedef struct {
spi_transaction_t *trans;
uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
//otherwise sets to the original buffer or NULL if no buffer is assigned.
uint32_t *buffer_to_rcv; // similar to buffer_to_send
} spi_trans_priv;
typedef struct {
spi_device_t *device[NO_CS];
intr_handle_t intr;
spi_dev_t *hw;
spi_transaction_t *cur_trans;
spi_trans_priv cur_trans_buf;
int cur_cs;
lldesc_t *dmadesc_tx;
lldesc_t *dmadesc_rx;
@ -130,6 +139,8 @@ esp_err_t spi_bus_initialize(spi_host_device_t host, const spi_bus_config_t *bus
esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void*)spihost[host], &spihost[host]->intr);
spihost[host]->hw=spicommon_hw_for_host(host);
spihost[host]->cur_cs = NO_CS;
//Reset DMA
spihost[host]->hw->dma_conf.val|=SPI_OUT_RST|SPI_IN_RST|SPI_AHBM_RST|SPI_AHBM_FIFO_RST;
spihost[host]->hw->dma_out_link.start=0;
@ -216,8 +227,8 @@ esp_err_t spi_bus_add_device(spi_host_device_t host, spi_device_interface_config
spihost[host]->device[freecs]=dev;
//Allocate queues, set defaults
dev->trans_queue=xQueueCreate(dev_config->queue_size, sizeof(spi_transaction_t *));
dev->ret_queue=xQueueCreate(dev_config->queue_size, sizeof(spi_transaction_t *));
dev->trans_queue=xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv));
dev->ret_queue=xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv));
if (!dev->trans_queue || !dev->ret_queue) goto nomem;
if (dev_config->duty_cycle_pos==0) dev_config->duty_cycle_pos=128;
dev->host=spihost[host];
@ -259,7 +270,7 @@ esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
//These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
//catch design errors and aren't meant to be triggered during normal operation.
SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
SPI_CHECK(handle->host->cur_trans==0 || handle->host->device[handle->host->cur_cs]!=handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
SPI_CHECK(handle->host->cur_cs == NO_CS || handle->host->device[handle->host->cur_cs]!=handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
//Kill queues
@ -343,43 +354,43 @@ static void IRAM_ATTR spi_intr(void *arg)
int prevCs=-1;
BaseType_t r;
BaseType_t do_yield=pdFALSE;
spi_trans_priv *trans_buf=NULL;
spi_transaction_t *trans=NULL;
spi_host_t *host=(spi_host_t*)arg;
//Ignore all but the trans_done int.
if (!host->hw->slave.trans_done) return;
if (host->cur_trans) {
/*------------ deal with the in-flight transaction -----------------*/
if (host->cur_cs != NO_CS) {
spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
//Okay, transaction is done.
if ((host->cur_trans->rx_buffer || (host->cur_trans->flags & SPI_TRANS_USE_RXDATA)) && host->dma_chan == 0) {
if (host->cur_trans_buf.buffer_to_rcv && host->dma_chan == 0 ) {
//Need to copy from SPI regs to result buffer.
uint32_t *data;
if (host->cur_trans->flags & SPI_TRANS_USE_RXDATA) {
data=(uint32_t*)&host->cur_trans->rx_data[0];
} else {
data=(uint32_t*)host->cur_trans->rx_buffer;
}
for (int x=0; x < host->cur_trans->rxlength; x+=32) {
for (int x=0; x < cur_trans->rxlength; x+=32) {
//Do a memcpy to get around possible alignment issues in rx_buffer
uint32_t word=host->hw->data_buf[x/32];
int len=host->cur_trans->rxlength-x;
int len=cur_trans->rxlength-x;
if (len>32) len=32;
memcpy(&data[x/32], &word, (len+7)/8);
memcpy(&host->cur_trans_buf.buffer_to_rcv[x/32], &word, (len+7)/8);
}
}
//Call post-transaction callback, if any
if (host->device[host->cur_cs]->cfg.post_cb) host->device[host->cur_cs]->cfg.post_cb(host->cur_trans);
if (host->device[host->cur_cs]->cfg.post_cb) host->device[host->cur_cs]->cfg.post_cb(cur_trans);
//Return transaction descriptor.
xQueueSendFromISR(host->device[host->cur_cs]->ret_queue, &host->cur_trans, &do_yield);
host->cur_trans=NULL;
xQueueSendFromISR(host->device[host->cur_cs]->ret_queue, &host->cur_trans_buf, &do_yield);
prevCs=host->cur_cs;
host->cur_cs = NO_CS;
}
//Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
if (host->dma_chan) spicommon_dmaworkaround_idle(host->dma_chan);
/*------------ new transaction starts here ------------------*/
//ToDo: This is a stupidly simple low-cs-first priority scheme. Make this configurable somehow. - JD
for (i=0; i<NO_CS; i++) {
if (host->device[i]) {
r=xQueueReceiveFromISR(host->device[i]->trans_queue, &trans, &do_yield);
r=xQueueReceiveFromISR(host->device[i]->trans_queue, &host->cur_trans_buf, &do_yield);
trans_buf = &host->cur_trans_buf;
//Stop looking if we have a transaction to send.
if (r) break;
}
@ -391,16 +402,11 @@ static void IRAM_ATTR spi_intr(void *arg)
host->hw->slave.trans_done=0; //clear int bit
//We have a transaction. Send it.
spi_device_t *dev=host->device[i];
host->cur_trans=trans;
trans = trans_buf->trans;
host->cur_cs=i;
//We should be done with the transmission.
assert(host->hw->cmd.usr == 0);
//Default rxlength to be the same as length, if not filled in.
if (trans->rxlength==0) {
trans->rxlength=trans->length;
}
//Reconfigure according to device settings, but only if we change CSses.
if (i!=prevCs) {
//Assumes a hardcoded 80MHz Fapb for now. ToDo: figure out something better once we have
@ -498,19 +504,13 @@ static void IRAM_ATTR spi_intr(void *arg)
//Fill DMA descriptors
if (trans->rx_buffer || (trans->flags & SPI_TRANS_USE_RXDATA)) {
uint32_t *data;
if (trans->flags & SPI_TRANS_USE_RXDATA) {
data=(uint32_t *)&trans->rx_data[0];
} else {
data=trans->rx_buffer;
}
if (trans_buf->buffer_to_rcv) {
host->hw->user.usr_miso_highpart=0;
if (host->dma_chan == 0) {
//No need to setup anything; we'll copy the result out of the work registers directly later.
} else {
spicommon_dmaworkaround_transfer_active(host->dma_chan); //mark channel as active
spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->rxlength+7)/8), (uint8_t*)data, true);
spicommon_setup_dma_desc_links(host->dmadesc_rx, ((trans->rxlength+7)/8), (uint8_t*)trans_buf->buffer_to_rcv, true);
host->hw->dma_in_link.addr=(int)(&host->dmadesc_rx[0]) & 0xFFFFF;
host->hw->dma_in_link.start=1;
}
@ -519,31 +519,26 @@ static void IRAM_ATTR spi_intr(void *arg)
host->hw->user.usr_miso=0;
}
if (trans->tx_buffer || (trans->flags & SPI_TRANS_USE_TXDATA)) {
uint32_t *data;
if (trans->flags & SPI_TRANS_USE_TXDATA) {
data=(uint32_t *)&trans->tx_data[0];
} else {
data=(uint32_t *)trans->tx_buffer;
}
if (trans_buf->buffer_to_send) {
if (host->dma_chan == 0) {
//Need to copy data to registers manually
for (int x=0; x < trans->length; x+=32) {
//Use memcpy to get around alignment issues for txdata
uint32_t word;
memcpy(&word, &data[x/32], 4);
memcpy(&word, &trans_buf->buffer_to_send[x/32], 4);
host->hw->data_buf[(x/32)+8]=word;
}
host->hw->user.usr_mosi_highpart=1;
} else {
spicommon_dmaworkaround_transfer_active(host->dma_chan); //mark channel as active
spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length+7)/8, (uint8_t*)data, false);
spicommon_setup_dma_desc_links(host->dmadesc_tx, (trans->length+7)/8, (uint8_t*)trans_buf->buffer_to_send, false);
host->hw->user.usr_mosi_highpart=0;
host->hw->dma_out_link.addr=(int)(&host->dmadesc_tx[0]) & 0xFFFFF;
host->hw->dma_out_link.start=1;
host->hw->user.usr_mosi_highpart=0;
}
}
host->hw->mosi_dlen.usr_mosi_dbitlen=trans->length-1;
host->hw->miso_dlen.usr_miso_dbitlen=trans->rxlength-1;
@ -554,8 +549,8 @@ static void IRAM_ATTR spi_intr(void *arg)
} else {
host->hw->addr=trans->address & 0xffffffff;
}
host->hw->user.usr_mosi=(trans->tx_buffer!=NULL || (trans->flags & SPI_TRANS_USE_TXDATA))?1:0;
host->hw->user.usr_miso=(trans->rx_buffer!=NULL || (trans->flags & SPI_TRANS_USE_RXDATA))?1:0;
host->hw->user.usr_mosi=(trans_buf->buffer_to_send)?1:0;
host->hw->user.usr_miso=(trans_buf->buffer_to_rcv)?1:0;
//Call pre-transmission callback, if any
if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
@ -570,17 +565,61 @@ esp_err_t spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *
{
BaseType_t r;
SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 ||trans_desc->rxlength <= 32, "rxdata transfer > 32 bits", ESP_ERR_INVALID_ARG);
SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 ||trans_desc->length <= 32, "txdata transfer > 32 bits", ESP_ERR_INVALID_ARG);
SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 ||trans_desc->rxlength <= 32, "rxdata transfer > 32 bits without configured DMA", ESP_ERR_INVALID_ARG);
SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 ||trans_desc->length <= 32, "txdata transfer > 32 bits without configured DMA", ESP_ERR_INVALID_ARG);
SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "incompatible iface params", ESP_ERR_INVALID_ARG);
SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (!(handle->cfg.flags & SPI_DEVICE_HALFDUPLEX))), "incompatible iface params", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->length <= handle->host->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
SPI_CHECK(trans_desc->rxlength <= handle->host->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
SPI_CHECK(handle->host->dma_chan == 0 || (trans_desc->flags & SPI_TRANS_USE_TXDATA) ||
trans_desc->tx_buffer==NULL || esp_ptr_dma_capable(trans_desc->tx_buffer), "txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG);
SPI_CHECK(handle->host->dma_chan == 0 || (trans_desc->flags & SPI_TRANS_USE_RXDATA) ||
trans_desc->rx_buffer==NULL || esp_ptr_dma_capable(trans_desc->rx_buffer), "rxdata not in DMA-capable memory", ESP_ERR_INVALID_ARG);
r=xQueueSend(handle->trans_queue, (void*)&trans_desc, ticks_to_wait);
//Default rxlength to be the same as length, if not filled in.
// set rxlength to length is ok, even when rx buffer=NULL
if (trans_desc->rxlength==0) {
trans_desc->rxlength=trans_desc->length;
}
spi_trans_priv trans_buf;
memset( &trans_buf, 0, sizeof(spi_trans_priv) );
trans_buf.trans = trans_desc;
// rx memory assign
if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
trans_buf.buffer_to_rcv = (uint32_t*)&trans_desc->rx_data[0];
} else {
//if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
trans_buf.buffer_to_rcv = trans_desc->rx_buffer;
}
if ( trans_buf.buffer_to_rcv && handle->host->dma_chan && !esp_ptr_dma_capable( trans_buf.buffer_to_rcv )) {
//if rxbuf in the desc not DMA-capable, malloc a new one
trans_buf.buffer_to_rcv = heap_caps_malloc((trans_desc->rxlength+7)/8, MALLOC_CAP_DMA);
if ( trans_buf.buffer_to_rcv==NULL ) return ESP_ERR_NO_MEM;
}
const uint32_t *txdata;
// tx memory assign
if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
txdata = (uint32_t*)&trans_desc->tx_data[0];
} else {
//if not use TXDATA neither tx_buffer, tx data assigned to NULL
txdata = trans_desc->tx_buffer ;
}
if ( txdata && handle->host->dma_chan && !esp_ptr_dma_capable( txdata )) {
//if txbuf in the desc not DMA-capable, malloc a new one
trans_buf.buffer_to_send = heap_caps_malloc((trans_desc->length+7)/8, MALLOC_CAP_DMA);
if ( trans_buf.buffer_to_send==NULL ) {
// free malloc-ed buffer (if needed) before return.
if ( (void*)trans_buf.buffer_to_rcv != trans_desc->rx_buffer && (void*)trans_buf.buffer_to_rcv != &trans_desc->rx_data[0] ) {
free( trans_buf.buffer_to_rcv );
}
return ESP_ERR_NO_MEM;
}
memcpy( trans_buf.buffer_to_send, txdata, (trans_desc->length+7)/8 );
} else {
// else use the original buffer (forced-conversion) or assign to NULL
trans_buf.buffer_to_send = (uint32_t*)txdata;
}
r=xQueueSend(handle->trans_queue, (void*)&trans_buf, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT;
esp_intr_enable(handle->host->intr);
return ESP_OK;
@ -589,9 +628,33 @@ esp_err_t spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *
esp_err_t spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
{
BaseType_t r;
spi_trans_priv trans_buf;
SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
r=xQueueReceive(handle->ret_queue, (void*)trans_desc, ticks_to_wait);
if (!r) return ESP_ERR_TIMEOUT;
r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
if (!r) {
// The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
// If timeout, wait and retry.
// Every on-flight transaction request occupies internal memory as DMA buffer if needed.
return ESP_ERR_TIMEOUT;
}
(*trans_desc) = trans_buf.trans;
if ( (void*)trans_buf.buffer_to_send != &(*trans_desc)->tx_data[0] && trans_buf.buffer_to_send != (*trans_desc)->tx_buffer ) {
free( trans_buf.buffer_to_send );
}
//copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
if ( (void*)trans_buf.buffer_to_rcv != &(*trans_desc)->rx_data[0] && trans_buf.buffer_to_rcv != (*trans_desc)->rx_buffer ) {
if ( (*trans_desc)->flags & SPI_TRANS_USE_RXDATA ) {
memcpy( (uint8_t*)&(*trans_desc)->rx_data[0], trans_buf.buffer_to_rcv, ((*trans_desc)->rxlength+7)/8 );
} else {
memcpy( (*trans_desc)->rx_buffer, trans_buf.buffer_to_rcv, ((*trans_desc)->rxlength+7)/8 );
}
free( trans_buf.buffer_to_rcv );
}
return ESP_OK;
}

View file

@ -317,6 +317,92 @@ TEST_CASE("SPI Master no response when switch from host1 (HSPI) to host2 (VSPI)"
assert(spi_device_transmit(spi, &transaction) == ESP_OK);
// test case success when see this.
printf("after second xmit\n");
}
IRAM_ATTR static uint32_t data_iram[320];
DRAM_ATTR static uint32_t data_dram[320];
//force to place in code area.
static const uint32_t data_drom[320] = {0};
#define PIN_NUM_MISO 25
#define PIN_NUM_MOSI 23
#define PIN_NUM_CLK 19
#define PIN_NUM_CS 22
#define PIN_NUM_DC 21
#define PIN_NUM_RST 18
#define PIN_NUM_BCKL 5
TEST_CASE("SPI Master DMA test, TX and RX in different regions", "[spi]")
{
uint32_t data_rxdram[320];
esp_err_t ret;
spi_device_handle_t spi;
spi_bus_config_t buscfg={
.miso_io_num=PIN_NUM_MISO,
.mosi_io_num=PIN_NUM_MOSI,
.sclk_io_num=PIN_NUM_CLK,
.quadwp_io_num=-1,
.quadhd_io_num=-1
};
spi_device_interface_config_t devcfg={
.clock_speed_hz=10000000, //Clock out at 10 MHz
.mode=0, //SPI mode 0
.spics_io_num=PIN_NUM_CS, //CS pin
.queue_size=7, //We want to be able to queue 7 transactions at a time
.pre_cb=NULL, //Specify pre-transfer callback to handle D/C line
};
//Initialize the SPI bus
ret=spi_bus_initialize(HSPI_HOST, &buscfg, 1);
assert(ret==ESP_OK);
//Attach the LCD to the SPI bus
ret=spi_bus_add_device(HSPI_HOST, &devcfg, &spi);
assert(ret==ESP_OK);
static spi_transaction_t trans[6];
int x;
printf("iram: %p, dram: %p, drom: %p\n", data_iram, data_dram, data_drom);
memset(trans, 0, 6*sizeof(spi_transaction_t));
trans[0].length = 320*8,
trans[0].tx_buffer = data_iram;
trans[0].rx_buffer = data_rxdram;
trans[1].length = 320*8,
trans[1].tx_buffer = data_dram;
trans[1].rx_buffer = data_rxdram;
trans[2].length = 320*8,
trans[2].tx_buffer = data_drom;
trans[2].rx_buffer = data_rxdram;
trans[3].length = 320*8,
trans[3].tx_buffer = data_drom;
trans[3].rx_buffer = data_iram;
trans[4].length = 320*8,
trans[4].rxlength = 8*4;
trans[4].tx_buffer = data_drom;
trans[4].flags = SPI_TRANS_USE_RXDATA;
trans[5].length = 8*4;
trans[5].flags = SPI_TRANS_USE_RXDATA | SPI_TRANS_USE_TXDATA;
//Queue all transactions.
for (x=0; x<6; x++) {
ret=spi_device_queue_trans(spi,&trans[x], portMAX_DELAY);
assert(ret==ESP_OK);
}
for (x=0; x<6; x++) {
spi_transaction_t* ptr;
ret=spi_device_get_trans_result(spi,&ptr, portMAX_DELAY);
assert(ret==ESP_OK);
assert(ptr = trans+x);
}
}