aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH Hartley Sweeten <hsweeten@visionengravers.com>2017-08-08 16:51:31 -0400
committerMark Brown <broonie@kernel.org>2017-08-09 12:53:14 -0400
commitd9a017713d909697f528a3f6569d5deb7477cea1 (patch)
tree35597ecbb4508e953d61e96dad149ce9761bcf72
parentc7a909cf25c6e1198f5a261d2af0503985871e1a (diff)
spi: spi-ep93xx: use the default master transfer queueing mechanism
Update this driver to the default implementation of transfer_one_message(). Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/spi/spi-ep93xx.c322
1 files changed, 108 insertions, 214 deletions
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index cf7d8175bf79..e5cc07357746 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -73,7 +73,6 @@
73 * @clk: clock for the controller 73 * @clk: clock for the controller
74 * @mmio: pointer to ioremap()'d registers 74 * @mmio: pointer to ioremap()'d registers
75 * @sspdr_phys: physical address of the SSPDR register 75 * @sspdr_phys: physical address of the SSPDR register
76 * @wait: wait here until given transfer is completed
77 * @tx: current byte in transfer to transmit 76 * @tx: current byte in transfer to transmit
78 * @rx: current byte in transfer to receive 77 * @rx: current byte in transfer to receive
79 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 78 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
@@ -91,7 +90,6 @@ struct ep93xx_spi {
91 struct clk *clk; 90 struct clk *clk;
92 void __iomem *mmio; 91 void __iomem *mmio;
93 unsigned long sspdr_phys; 92 unsigned long sspdr_phys;
94 struct completion wait;
95 size_t tx; 93 size_t tx;
96 size_t rx; 94 size_t rx;
97 size_t fifo_level; 95 size_t fifo_level;
@@ -123,8 +121,7 @@ static int ep93xx_spi_calc_divisors(struct spi_master *master,
123 121
124 /* 122 /*
125 * Make sure that max value is between values supported by the 123 * Make sure that max value is between values supported by the
126 * controller. Note that minimum value is already checked in 124 * controller.
127 * ep93xx_spi_transfer_one_message().
128 */ 125 */
129 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz); 126 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
130 127
@@ -149,15 +146,6 @@ static int ep93xx_spi_calc_divisors(struct spi_master *master,
149 return -EINVAL; 146 return -EINVAL;
150} 147}
151 148
152static void ep93xx_spi_cs_control(struct spi_device *spi, bool enable)
153{
154 if (spi->mode & SPI_CS_HIGH)
155 enable = !enable;
156
157 if (gpio_is_valid(spi->cs_gpio))
158 gpio_set_value(spi->cs_gpio, !enable);
159}
160
161static int ep93xx_spi_chip_setup(struct spi_master *master, 149static int ep93xx_spi_chip_setup(struct spi_master *master,
162 struct spi_device *spi, 150 struct spi_device *spi,
163 struct spi_transfer *xfer) 151 struct spi_transfer *xfer)
@@ -188,34 +176,38 @@ static int ep93xx_spi_chip_setup(struct spi_master *master,
188 return 0; 176 return 0;
189} 177}
190 178
191static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 179static void ep93xx_do_write(struct spi_master *master)
192{ 180{
181 struct ep93xx_spi *espi = spi_master_get_devdata(master);
182 struct spi_transfer *xfer = master->cur_msg->state;
193 u32 val = 0; 183 u32 val = 0;
194 184
195 if (t->bits_per_word > 8) { 185 if (xfer->bits_per_word > 8) {
196 if (t->tx_buf) 186 if (xfer->tx_buf)
197 val = ((u16 *)t->tx_buf)[espi->tx]; 187 val = ((u16 *)xfer->tx_buf)[espi->tx];
198 espi->tx += 2; 188 espi->tx += 2;
199 } else { 189 } else {
200 if (t->tx_buf) 190 if (xfer->tx_buf)
201 val = ((u8 *)t->tx_buf)[espi->tx]; 191 val = ((u8 *)xfer->tx_buf)[espi->tx];
202 espi->tx += 1; 192 espi->tx += 1;
203 } 193 }
204 writel(val, espi->mmio + SSPDR); 194 writel(val, espi->mmio + SSPDR);
205} 195}
206 196
207static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 197static void ep93xx_do_read(struct spi_master *master)
208{ 198{
199 struct ep93xx_spi *espi = spi_master_get_devdata(master);
200 struct spi_transfer *xfer = master->cur_msg->state;
209 u32 val; 201 u32 val;
210 202
211 val = readl(espi->mmio + SSPDR); 203 val = readl(espi->mmio + SSPDR);
212 if (t->bits_per_word > 8) { 204 if (xfer->bits_per_word > 8) {
213 if (t->rx_buf) 205 if (xfer->rx_buf)
214 ((u16 *)t->rx_buf)[espi->rx] = val; 206 ((u16 *)xfer->rx_buf)[espi->rx] = val;
215 espi->rx += 2; 207 espi->rx += 2;
216 } else { 208 } else {
217 if (t->rx_buf) 209 if (xfer->rx_buf)
218 ((u8 *)t->rx_buf)[espi->rx] = val; 210 ((u8 *)xfer->rx_buf)[espi->rx] = val;
219 espi->rx += 1; 211 espi->rx += 1;
220 } 212 }
221} 213}
@@ -234,45 +226,26 @@ static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
234static int ep93xx_spi_read_write(struct spi_master *master) 226static int ep93xx_spi_read_write(struct spi_master *master)
235{ 227{
236 struct ep93xx_spi *espi = spi_master_get_devdata(master); 228 struct ep93xx_spi *espi = spi_master_get_devdata(master);
237 struct spi_transfer *t = master->cur_msg->state; 229 struct spi_transfer *xfer = master->cur_msg->state;
238 230
239 /* read as long as RX FIFO has frames in it */ 231 /* read as long as RX FIFO has frames in it */
240 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) { 232 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
241 ep93xx_do_read(espi, t); 233 ep93xx_do_read(master);
242 espi->fifo_level--; 234 espi->fifo_level--;
243 } 235 }
244 236
245 /* write as long as TX FIFO has room */ 237 /* write as long as TX FIFO has room */
246 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 238 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
247 ep93xx_do_write(espi, t); 239 ep93xx_do_write(master);
248 espi->fifo_level++; 240 espi->fifo_level++;
249 } 241 }
250 242
251 if (espi->rx == t->len) 243 if (espi->rx == xfer->len)
252 return 0; 244 return 0;
253 245
254 return -EINPROGRESS; 246 return -EINPROGRESS;
255} 247}
256 248
257static void ep93xx_spi_pio_transfer(struct spi_master *master)
258{
259 struct ep93xx_spi *espi = spi_master_get_devdata(master);
260
261 /*
262 * Now everything is set up for the current transfer. We prime the TX
263 * FIFO, enable interrupts, and wait for the transfer to complete.
264 */
265 if (ep93xx_spi_read_write(master)) {
266 u32 val;
267
268 val = readl(espi->mmio + SSPCR1);
269 val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
270 writel(val, espi->mmio + SSPCR1);
271
272 wait_for_completion(&espi->wait);
273 }
274}
275
276/** 249/**
277 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 250 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
278 * @master: SPI master 251 * @master: SPI master
@@ -287,7 +260,7 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
287 enum dma_transfer_direction dir) 260 enum dma_transfer_direction dir)
288{ 261{
289 struct ep93xx_spi *espi = spi_master_get_devdata(master); 262 struct ep93xx_spi *espi = spi_master_get_devdata(master);
290 struct spi_transfer *t = master->cur_msg->state; 263 struct spi_transfer *xfer = master->cur_msg->state;
291 struct dma_async_tx_descriptor *txd; 264 struct dma_async_tx_descriptor *txd;
292 enum dma_slave_buswidth buswidth; 265 enum dma_slave_buswidth buswidth;
293 struct dma_slave_config conf; 266 struct dma_slave_config conf;
@@ -295,10 +268,10 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
295 struct sg_table *sgt; 268 struct sg_table *sgt;
296 struct dma_chan *chan; 269 struct dma_chan *chan;
297 const void *buf, *pbuf; 270 const void *buf, *pbuf;
298 size_t len = t->len; 271 size_t len = xfer->len;
299 int i, ret, nents; 272 int i, ret, nents;
300 273
301 if (t->bits_per_word > 8) 274 if (xfer->bits_per_word > 8)
302 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 275 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
303 else 276 else
304 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 277 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -308,14 +281,14 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
308 281
309 if (dir == DMA_DEV_TO_MEM) { 282 if (dir == DMA_DEV_TO_MEM) {
310 chan = espi->dma_rx; 283 chan = espi->dma_rx;
311 buf = t->rx_buf; 284 buf = xfer->rx_buf;
312 sgt = &espi->rx_sgt; 285 sgt = &espi->rx_sgt;
313 286
314 conf.src_addr = espi->sspdr_phys; 287 conf.src_addr = espi->sspdr_phys;
315 conf.src_addr_width = buswidth; 288 conf.src_addr_width = buswidth;
316 } else { 289 } else {
317 chan = espi->dma_tx; 290 chan = espi->dma_tx;
318 buf = t->tx_buf; 291 buf = xfer->tx_buf;
319 sgt = &espi->tx_sgt; 292 sgt = &espi->tx_sgt;
320 293
321 conf.dst_addr = espi->sspdr_phys; 294 conf.dst_addr = espi->sspdr_phys;
@@ -406,10 +379,15 @@ static void ep93xx_spi_dma_finish(struct spi_master *master,
406 379
407static void ep93xx_spi_dma_callback(void *callback_param) 380static void ep93xx_spi_dma_callback(void *callback_param)
408{ 381{
409 complete(callback_param); 382 struct spi_master *master = callback_param;
383
384 ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
385 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
386
387 spi_finalize_current_transfer(master);
410} 388}
411 389
412static void ep93xx_spi_dma_transfer(struct spi_master *master) 390static int ep93xx_spi_dma_transfer(struct spi_master *master)
413{ 391{
414 struct ep93xx_spi *espi = spi_master_get_devdata(master); 392 struct ep93xx_spi *espi = spi_master_get_devdata(master);
415 struct dma_async_tx_descriptor *rxd, *txd; 393 struct dma_async_tx_descriptor *rxd, *txd;
@@ -417,177 +395,29 @@ static void ep93xx_spi_dma_transfer(struct spi_master *master)
417 rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM); 395 rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
418 if (IS_ERR(rxd)) { 396 if (IS_ERR(rxd)) {
419 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 397 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
420 master->cur_msg->status = PTR_ERR(rxd); 398 return PTR_ERR(rxd);
421 return;
422 } 399 }
423 400
424 txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV); 401 txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
425 if (IS_ERR(txd)) { 402 if (IS_ERR(txd)) {
426 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 403 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
427 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); 404 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
428 master->cur_msg->status = PTR_ERR(txd); 405 return PTR_ERR(txd);
429 return;
430 } 406 }
431 407
432 /* We are ready when RX is done */ 408 /* We are ready when RX is done */
433 rxd->callback = ep93xx_spi_dma_callback; 409 rxd->callback = ep93xx_spi_dma_callback;
434 rxd->callback_param = &espi->wait; 410 rxd->callback_param = master;
435 411
436 /* Now submit both descriptors and wait while they finish */ 412 /* Now submit both descriptors and start DMA */
437 dmaengine_submit(rxd); 413 dmaengine_submit(rxd);
438 dmaengine_submit(txd); 414 dmaengine_submit(txd);
439 415
440 dma_async_issue_pending(espi->dma_rx); 416 dma_async_issue_pending(espi->dma_rx);
441 dma_async_issue_pending(espi->dma_tx); 417 dma_async_issue_pending(espi->dma_tx);
442 418
443 wait_for_completion(&espi->wait); 419 /* signal that we need to wait for completion */
444 420 return 1;
445 ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
446 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
447}
448
449/**
450 * ep93xx_spi_process_transfer() - processes one SPI transfer
451 * @master: SPI master
452 * @msg: current message
453 * @t: transfer to process
454 *
455 * This function processes one SPI transfer given in @t. Function waits until
456 * transfer is complete (may sleep) and updates @msg->status based on whether
457 * transfer was successfully processed or not.
458 */
459static void ep93xx_spi_process_transfer(struct spi_master *master,
460 struct spi_message *msg,
461 struct spi_transfer *t)
462{
463 struct ep93xx_spi *espi = spi_master_get_devdata(master);
464 int err;
465
466 msg->state = t;
467
468 err = ep93xx_spi_chip_setup(master, msg->spi, t);
469 if (err) {
470 dev_err(&master->dev,
471 "failed to setup chip for transfer\n");
472 msg->status = err;
473 return;
474 }
475
476 espi->rx = 0;
477 espi->tx = 0;
478
479 /*
480 * There is no point of setting up DMA for the transfers which will
481 * fit into the FIFO and can be transferred with a single interrupt.
482 * So in these cases we will be using PIO and don't bother for DMA.
483 */
484 if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
485 ep93xx_spi_dma_transfer(master);
486 else
487 ep93xx_spi_pio_transfer(master);
488
489 /*
490 * In case of error during transmit, we bail out from processing
491 * the message.
492 */
493 if (msg->status)
494 return;
495
496 msg->actual_length += t->len;
497
498 /*
499 * After this transfer is finished, perform any possible
500 * post-transfer actions requested by the protocol driver.
501 */
502 if (t->delay_usecs) {
503 set_current_state(TASK_UNINTERRUPTIBLE);
504 schedule_timeout(usecs_to_jiffies(t->delay_usecs));
505 }
506 if (t->cs_change) {
507 if (!list_is_last(&t->transfer_list, &msg->transfers)) {
508 /*
509 * In case protocol driver is asking us to drop the
510 * chipselect briefly, we let the scheduler to handle
511 * any "delay" here.
512 */
513 ep93xx_spi_cs_control(msg->spi, false);
514 cond_resched();
515 ep93xx_spi_cs_control(msg->spi, true);
516 }
517 }
518}
519
520/*
521 * ep93xx_spi_process_message() - process one SPI message
522 * @master: SPI master
523 * @msg: message to process
524 *
525 * This function processes a single SPI message. We go through all transfers in
526 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
527 * asserted during the whole message (unless per transfer cs_change is set).
528 *
529 * @msg->status contains %0 in case of success or negative error code in case of
530 * failure.
531 */
532static void ep93xx_spi_process_message(struct spi_master *master,
533 struct spi_message *msg)
534{
535 struct ep93xx_spi *espi = spi_master_get_devdata(master);
536 unsigned long timeout;
537 struct spi_transfer *t;
538
539 /*
540 * Just to be sure: flush any data from RX FIFO.
541 */
542 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
543 while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
544 if (time_after(jiffies, timeout)) {
545 dev_warn(&master->dev,
546 "timeout while flushing RX FIFO\n");
547 msg->status = -ETIMEDOUT;
548 return;
549 }
550 readl(espi->mmio + SSPDR);
551 }
552
553 /*
554 * We explicitly handle FIFO level. This way we don't have to check TX
555 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
556 */
557 espi->fifo_level = 0;
558
559 /*
560 * Assert the chipselect.
561 */
562 ep93xx_spi_cs_control(msg->spi, true);
563
564 list_for_each_entry(t, &msg->transfers, transfer_list) {
565 ep93xx_spi_process_transfer(master, msg, t);
566 if (msg->status)
567 break;
568 }
569
570 /*
571 * Now the whole message is transferred (or failed for some reason). We
572 * deselect the device and disable the SPI controller.
573 */
574 ep93xx_spi_cs_control(msg->spi, false);
575}
576
577static int ep93xx_spi_transfer_one_message(struct spi_master *master,
578 struct spi_message *msg)
579{
580 struct ep93xx_spi *espi = spi_master_get_devdata(master);
581
582 msg->state = NULL;
583 msg->status = 0;
584 msg->actual_length = 0;
585
586 ep93xx_spi_process_message(master, msg);
587
588 spi_finalize_current_message(master);
589
590 return 0;
591} 421}
592 422
593static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 423static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
@@ -630,11 +460,76 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
630 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 460 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
631 writel(val, espi->mmio + SSPCR1); 461 writel(val, espi->mmio + SSPCR1);
632 462
633 complete(&espi->wait); 463 spi_finalize_current_transfer(master);
634 464
635 return IRQ_HANDLED; 465 return IRQ_HANDLED;
636} 466}
637 467
468static int ep93xx_spi_transfer_one(struct spi_master *master,
469 struct spi_device *spi,
470 struct spi_transfer *xfer)
471{
472 struct ep93xx_spi *espi = spi_master_get_devdata(master);
473 u32 val;
474 int ret;
475
476 ret = ep93xx_spi_chip_setup(master, spi, xfer);
477 if (ret) {
478 dev_err(&master->dev, "failed to setup chip for transfer\n");
479 return ret;
480 }
481
482 master->cur_msg->state = xfer;
483 espi->rx = 0;
484 espi->tx = 0;
485
486 /*
487 * There is no point of setting up DMA for the transfers which will
488 * fit into the FIFO and can be transferred with a single interrupt.
489 * So in these cases we will be using PIO and don't bother for DMA.
490 */
491 if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
492 return ep93xx_spi_dma_transfer(master);
493
494 /* Using PIO so prime the TX FIFO and enable interrupts */
495 ep93xx_spi_read_write(master);
496
497 val = readl(espi->mmio + SSPCR1);
498 val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
499 writel(val, espi->mmio + SSPCR1);
500
501 /* signal that we need to wait for completion */
502 return 1;
503}
504
505static int ep93xx_spi_prepare_message(struct spi_master *master,
506 struct spi_message *msg)
507{
508 struct ep93xx_spi *espi = spi_master_get_devdata(master);
509 unsigned long timeout;
510
511 /*
512 * Just to be sure: flush any data from RX FIFO.
513 */
514 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
515 while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
516 if (time_after(jiffies, timeout)) {
517 dev_warn(&master->dev,
518 "timeout while flushing RX FIFO\n");
519 return -ETIMEDOUT;
520 }
521 readl(espi->mmio + SSPDR);
522 }
523
524 /*
525 * We explicitly handle FIFO level. This way we don't have to check TX
526 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
527 */
528 espi->fifo_level = 0;
529
530 return 0;
531}
532
638static int ep93xx_spi_prepare_hardware(struct spi_master *master) 533static int ep93xx_spi_prepare_hardware(struct spi_master *master)
639{ 534{
640 struct ep93xx_spi *espi = spi_master_get_devdata(master); 535 struct ep93xx_spi *espi = spi_master_get_devdata(master);
@@ -769,7 +664,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
769 664
770 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware; 665 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
771 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware; 666 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
772 master->transfer_one_message = ep93xx_spi_transfer_one_message; 667 master->prepare_message = ep93xx_spi_prepare_message;
668 master->transfer_one = ep93xx_spi_transfer_one;
773 master->bus_num = pdev->id; 669 master->bus_num = pdev->id;
774 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 670 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
775 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 671 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
@@ -810,8 +706,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
810 goto fail_release_master; 706 goto fail_release_master;
811 } 707 }
812 708
813 init_completion(&espi->wait);
814
815 /* 709 /*
816 * Calculate maximum and minimum supported clock rates 710 * Calculate maximum and minimum supported clock rates
817 * for the controller. 711 * for the controller.