aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/atmel_spi.c
diff options
context:
space:
mode:
authorSilvester Erdeg <slipszi@gmail.com>2008-02-06 04:38:12 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 13:41:10 -0500
commit154443c72f47169ebcb3a7befbff0e934c49bff3 (patch)
treec0567d5aa3c46ce4056da07ddacead626f933c73 /drivers/spi/atmel_spi.c
parent1eed29df472a33bba013d5a2ea2f9e32f4414397 (diff)
atmel_spi: chain DMA transfers
Add support for chained transfers in the atmel_spi driver, letting the DMA controller switch to the next buffer pair without CPU intervention. This reduced I/O latencies by about 2% in one bulk I/O test. It should also help work around several interrelated errata affecting chipselect 0 on at91rm9200 chips. Almost all of the changes are in the reworked atmel_spi_next_xfer() function. That's now called with the driver in one of three states: 1. It isn't transferring anything (in which case the first transfer of the current message is going to be sent) 2. It has finished transfering a non-chainable transfer (in which case it will go to the next transfer in the message) 3. It has finished transfering a chained transfer (in which case the next transfer is already queued) After that it will queue the next transfer if it can be chained. Signed-off-by: Szilveszter Ordog <slipszi@gmail.com> Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/spi/atmel_spi.c')
-rw-r--r--drivers/spi/atmel_spi.c146
1 files changed, 101 insertions, 45 deletions
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index b09d33678dd8..545519a063b8 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -51,7 +51,9 @@ struct atmel_spi {
51 u8 stopping; 51 u8 stopping;
52 struct list_head queue; 52 struct list_head queue;
53 struct spi_transfer *current_transfer; 53 struct spi_transfer *current_transfer;
54 unsigned long remaining_bytes; 54 unsigned long current_remaining_bytes;
55 struct spi_transfer *next_transfer;
56 unsigned long next_remaining_bytes;
55 57
56 void *buffer; 58 void *buffer;
57 dma_addr_t buffer_dma; 59 dma_addr_t buffer_dma;
@@ -121,6 +123,48 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
121 gpio_set_value(gpio, !active); 123 gpio_set_value(gpio, !active);
122} 124}
123 125
126static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
127 struct spi_transfer *xfer)
128{
129 return msg->transfers.prev == &xfer->transfer_list;
130}
131
132static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
133{
134 return xfer->delay_usecs == 0 && !xfer->cs_change;
135}
136
137static void atmel_spi_next_xfer_data(struct spi_master *master,
138 struct spi_transfer *xfer,
139 dma_addr_t *tx_dma,
140 dma_addr_t *rx_dma,
141 u32 *plen)
142{
143 struct atmel_spi *as = spi_master_get_devdata(master);
144 u32 len = *plen;
145
146 /* use scratch buffer only when rx or tx data is unspecified */
147 if (xfer->rx_buf)
148 *rx_dma = xfer->rx_dma + xfer->len - len;
149 else {
150 *rx_dma = as->buffer_dma;
151 if (len > BUFFER_SIZE)
152 len = BUFFER_SIZE;
153 }
154 if (xfer->tx_buf)
155 *tx_dma = xfer->tx_dma + xfer->len - len;
156 else {
157 *tx_dma = as->buffer_dma;
158 if (len > BUFFER_SIZE)
159 len = BUFFER_SIZE;
160 memset(as->buffer, 0, len);
161 dma_sync_single_for_device(&as->pdev->dev,
162 as->buffer_dma, len, DMA_TO_DEVICE);
163 }
164
165 *plen = len;
166}
167
124/* 168/*
125 * Submit next transfer for DMA. 169 * Submit next transfer for DMA.
126 * lock is held, spi irq is blocked 170 * lock is held, spi irq is blocked
@@ -130,53 +174,68 @@ static void atmel_spi_next_xfer(struct spi_master *master,
130{ 174{
131 struct atmel_spi *as = spi_master_get_devdata(master); 175 struct atmel_spi *as = spi_master_get_devdata(master);
132 struct spi_transfer *xfer; 176 struct spi_transfer *xfer;
133 u32 len; 177 u32 len, remaining, total;
134 dma_addr_t tx_dma, rx_dma; 178 dma_addr_t tx_dma, rx_dma;
135 179
136 xfer = as->current_transfer; 180 if (!as->current_transfer)
137 if (!xfer || as->remaining_bytes == 0) { 181 xfer = list_entry(msg->transfers.next,
138 if (xfer) 182 struct spi_transfer, transfer_list);
139 xfer = list_entry(xfer->transfer_list.next, 183 else if (!as->next_transfer)
140 struct spi_transfer, transfer_list); 184 xfer = list_entry(as->current_transfer->transfer_list.next,
141 else 185 struct spi_transfer, transfer_list);
142 xfer = list_entry(msg->transfers.next, 186 else
143 struct spi_transfer, transfer_list); 187 xfer = NULL;
144 as->remaining_bytes = xfer->len;
145 as->current_transfer = xfer;
146 }
147 188
148 len = as->remaining_bytes; 189 if (xfer) {
190 len = xfer->len;
191 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
192 remaining = xfer->len - len;
149 193
150 tx_dma = xfer->tx_dma + xfer->len - len; 194 spi_writel(as, RPR, rx_dma);
151 rx_dma = xfer->rx_dma + xfer->len - len; 195 spi_writel(as, TPR, tx_dma);
152 196
153 /* use scratch buffer only when rx or tx data is unspecified */ 197 if (msg->spi->bits_per_word > 8)
154 if (!xfer->rx_buf) { 198 len >>= 1;
155 rx_dma = as->buffer_dma; 199 spi_writel(as, RCR, len);
156 if (len > BUFFER_SIZE) 200 spi_writel(as, TCR, len);
157 len = BUFFER_SIZE; 201 } else {
158 } 202 xfer = as->next_transfer;
159 if (!xfer->tx_buf) { 203 remaining = as->next_remaining_bytes;
160 tx_dma = as->buffer_dma;
161 if (len > BUFFER_SIZE)
162 len = BUFFER_SIZE;
163 memset(as->buffer, 0, len);
164 dma_sync_single_for_device(&as->pdev->dev,
165 as->buffer_dma, len, DMA_TO_DEVICE);
166 } 204 }
167 205
168 spi_writel(as, RPR, rx_dma); 206 as->current_transfer = xfer;
169 spi_writel(as, TPR, tx_dma); 207 as->current_remaining_bytes = remaining;
170 208
171 as->remaining_bytes -= len; 209 if (remaining > 0)
172 if (msg->spi->bits_per_word > 8) 210 len = remaining;
173 len >>= 1; 211 else if (!atmel_spi_xfer_is_last(msg, xfer) &&
212 atmel_spi_xfer_can_be_chained(xfer)) {
213 xfer = list_entry(xfer->transfer_list.next,
214 struct spi_transfer, transfer_list);
215 len = xfer->len;
216 } else
217 xfer = NULL;
174 218
175 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer" 219 as->next_transfer = xfer;
176 * mechanism might help avoid the IRQ latency between transfers 220
177 * (and improve the nCS0 errata handling on at91rm9200 chips) 221 if (xfer) {
178 * 222 total = len;
179 * We're also waiting for ENDRX before we start the next 223 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
224 as->next_remaining_bytes = total - len;
225
226 spi_writel(as, RNPR, rx_dma);
227 spi_writel(as, TNPR, tx_dma);
228
229 if (msg->spi->bits_per_word > 8)
230 len >>= 1;
231 spi_writel(as, RNCR, len);
232 spi_writel(as, TNCR, len);
233 } else {
234 spi_writel(as, RNCR, 0);
235 spi_writel(as, TNCR, 0);
236 }
237
238 /* REVISIT: We're waiting for ENDRX before we start the next
180 * transfer because we need to handle some difficult timing 239 * transfer because we need to handle some difficult timing
181 * issues otherwise. If we wait for ENDTX in one transfer and 240 * issues otherwise. If we wait for ENDTX in one transfer and
182 * then starts waiting for ENDRX in the next, it's difficult 241 * then starts waiting for ENDRX in the next, it's difficult
@@ -186,8 +245,6 @@ static void atmel_spi_next_xfer(struct spi_master *master,
186 * 245 *
187 * It should be doable, though. Just not now... 246 * It should be doable, though. Just not now...
188 */ 247 */
189 spi_writel(as, TNCR, 0);
190 spi_writel(as, RNCR, 0);
191 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 248 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
192 249
193 dev_dbg(&msg->spi->dev, 250 dev_dbg(&msg->spi->dev,
@@ -195,8 +252,6 @@ static void atmel_spi_next_xfer(struct spi_master *master,
195 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 252 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
196 xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR)); 253 xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR));
197 254
198 spi_writel(as, RCR, len);
199 spi_writel(as, TCR, len);
200 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 255 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
201} 256}
202 257
@@ -294,6 +349,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
294 spin_lock(&as->lock); 349 spin_lock(&as->lock);
295 350
296 as->current_transfer = NULL; 351 as->current_transfer = NULL;
352 as->next_transfer = NULL;
297 353
298 /* continue if needed */ 354 /* continue if needed */
299 if (list_empty(&as->queue) || as->stopping) 355 if (list_empty(&as->queue) || as->stopping)
@@ -377,7 +433,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
377 433
378 spi_writel(as, IDR, pending); 434 spi_writel(as, IDR, pending);
379 435
380 if (as->remaining_bytes == 0) { 436 if (as->current_remaining_bytes == 0) {
381 msg->actual_length += xfer->len; 437 msg->actual_length += xfer->len;
382 438
383 if (!msg->is_dma_mapped) 439 if (!msg->is_dma_mapped)
@@ -387,7 +443,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
387 if (xfer->delay_usecs) 443 if (xfer->delay_usecs)
388 udelay(xfer->delay_usecs); 444 udelay(xfer->delay_usecs);
389 445
390 if (msg->transfers.prev == &xfer->transfer_list) { 446 if (atmel_spi_xfer_is_last(msg, xfer)) {
391 /* report completed message */ 447 /* report completed message */
392 atmel_spi_msg_done(master, as, msg, 0, 448 atmel_spi_msg_done(master, as, msg, 0,
393 xfer->cs_change); 449 xfer->cs_change);