aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-pl022.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2012-02-22 04:05:38 -0500
committerGrant Likely <grant.likely@secretlab.ca>2012-03-07 21:19:48 -0500
commitffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0 (patch)
tree964ce05f044aa6917b4a1ed58ed055ed2e899dcc /drivers/spi/spi-pl022.c
parent0b2182ddac4b8837bbba996d03b7b28f4346db0a (diff)
spi: create a message queueing infrastructure
This rips the message queue in the PL022 driver out and pushes it into (optional) common infrastructure. Drivers that want to use the message pumping thread will need to define the new per-messags transfer methods and leave the deprecated transfer() method as NULL. Most of the design is described in the documentation changes that are included in this patch. Since there is a queue that need to be stopped when the system is suspending/resuming, two new calls are implemented for the device drivers to call in their suspend()/resume() functions: spi_master_suspend() and spi_master_resume(). ChangeLog v1->v2: - Remove Kconfig entry and do not make the queue support optional at all, instead be more agressive and have it as part of the compulsory infrastructure. - If the .transfer() method is implemented, delete print a small deprecation notice and do not start the transfer pump. - Fix a bitrotted comment. ChangeLog v2->v3: - Fix up a problematic sequence courtesy of Chris Blair. - Stop rather than destroy the queue on suspend() courtesy of Chris Blair. Signed-off-by: Chris Blair <chris.blair@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com> Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com> Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/spi/spi-pl022.c')
-rw-r--r--drivers/spi/spi-pl022.c303
1 files changed, 52 insertions, 251 deletions
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 81847c9a7586..ec17a7af7e28 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -29,7 +29,6 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
32#include <linux/kthread.h>
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/clk.h> 33#include <linux/clk.h>
35#include <linux/err.h> 34#include <linux/err.h>
@@ -41,7 +40,6 @@
41#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
42#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
43#include <linux/pm_runtime.h> 42#include <linux/pm_runtime.h>
44#include <linux/sched.h>
45 43
46/* 44/*
47 * This macro is used to define some register default values. 45 * This macro is used to define some register default values.
@@ -367,15 +365,7 @@ struct pl022 {
367 struct clk *clk; 365 struct clk *clk;
368 struct spi_master *master; 366 struct spi_master *master;
369 struct pl022_ssp_controller *master_info; 367 struct pl022_ssp_controller *master_info;
370 /* Driver message pump */ 368 /* Message per-transfer pump */
371 struct kthread_worker kworker;
372 struct task_struct *kworker_task;
373 struct kthread_work pump_messages;
374 spinlock_t queue_lock;
375 struct list_head queue;
376 bool busy;
377 bool running;
378 /* Message transfer pump */
379 struct tasklet_struct pump_transfers; 369 struct tasklet_struct pump_transfers;
380 struct spi_message *cur_msg; 370 struct spi_message *cur_msg;
381 struct spi_transfer *cur_transfer; 371 struct spi_transfer *cur_transfer;
@@ -397,6 +387,7 @@ struct pl022 {
397 struct sg_table sgt_rx; 387 struct sg_table sgt_rx;
398 struct sg_table sgt_tx; 388 struct sg_table sgt_tx;
399 char *dummypage; 389 char *dummypage;
390 bool dma_running;
400#endif 391#endif
401}; 392};
402 393
@@ -451,8 +442,6 @@ static void null_cs_control(u32 command)
451static void giveback(struct pl022 *pl022) 442static void giveback(struct pl022 *pl022)
452{ 443{
453 struct spi_transfer *last_transfer; 444 struct spi_transfer *last_transfer;
454 unsigned long flags;
455 struct spi_message *msg;
456 pl022->next_msg_cs_active = false; 445 pl022->next_msg_cs_active = false;
457 446
458 last_transfer = list_entry(pl022->cur_msg->transfers.prev, 447 last_transfer = list_entry(pl022->cur_msg->transfers.prev,
@@ -480,15 +469,8 @@ static void giveback(struct pl022 *pl022)
480 * sent the current message could be unloaded, which 469 * sent the current message could be unloaded, which
481 * could invalidate the cs_control() callback... 470 * could invalidate the cs_control() callback...
482 */ 471 */
483
484 /* get a pointer to the next message, if any */ 472 /* get a pointer to the next message, if any */
485 spin_lock_irqsave(&pl022->queue_lock, flags); 473 next_msg = spi_get_next_queued_message(pl022->master);
486 if (list_empty(&pl022->queue))
487 next_msg = NULL;
488 else
489 next_msg = list_entry(pl022->queue.next,
490 struct spi_message, queue);
491 spin_unlock_irqrestore(&pl022->queue_lock, flags);
492 474
493 /* 475 /*
494 * see if the next and current messages point 476 * see if the next and current messages point
@@ -500,19 +482,13 @@ static void giveback(struct pl022 *pl022)
500 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 482 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
501 else 483 else
502 pl022->next_msg_cs_active = true; 484 pl022->next_msg_cs_active = true;
485
503 } 486 }
504 487
505 spin_lock_irqsave(&pl022->queue_lock, flags);
506 msg = pl022->cur_msg;
507 pl022->cur_msg = NULL; 488 pl022->cur_msg = NULL;
508 pl022->cur_transfer = NULL; 489 pl022->cur_transfer = NULL;
509 pl022->cur_chip = NULL; 490 pl022->cur_chip = NULL;
510 queue_kthread_work(&pl022->kworker, &pl022->pump_messages); 491 spi_finalize_current_message(pl022->master);
511 spin_unlock_irqrestore(&pl022->queue_lock, flags);
512
513 msg->state = NULL;
514 if (msg->complete)
515 msg->complete(msg->context);
516} 492}
517 493
518/** 494/**
@@ -1066,6 +1042,7 @@ static int configure_dma(struct pl022 *pl022)
1066 dmaengine_submit(txdesc); 1042 dmaengine_submit(txdesc);
1067 dma_async_issue_pending(rxchan); 1043 dma_async_issue_pending(rxchan);
1068 dma_async_issue_pending(txchan); 1044 dma_async_issue_pending(txchan);
1045 pl022->dma_running = true;
1069 1046
1070 return 0; 1047 return 0;
1071 1048
@@ -1144,11 +1121,12 @@ static void terminate_dma(struct pl022 *pl022)
1144 dmaengine_terminate_all(rxchan); 1121 dmaengine_terminate_all(rxchan);
1145 dmaengine_terminate_all(txchan); 1122 dmaengine_terminate_all(txchan);
1146 unmap_free_dma_scatter(pl022); 1123 unmap_free_dma_scatter(pl022);
1124 pl022->dma_running = false;
1147} 1125}
1148 1126
1149static void pl022_dma_remove(struct pl022 *pl022) 1127static void pl022_dma_remove(struct pl022 *pl022)
1150{ 1128{
1151 if (pl022->busy) 1129 if (pl022->dma_running)
1152 terminate_dma(pl022); 1130 terminate_dma(pl022);
1153 if (pl022->dma_tx_channel) 1131 if (pl022->dma_tx_channel)
1154 dma_release_channel(pl022->dma_tx_channel); 1132 dma_release_channel(pl022->dma_tx_channel);
@@ -1496,73 +1474,20 @@ out:
1496 return; 1474 return;
1497} 1475}
1498 1476
1499/** 1477static int pl022_transfer_one_message(struct spi_master *master,
1500 * pump_messages - kthread work function which processes spi message queue 1478 struct spi_message *msg)
1501 * @work: pointer to kthread work struct contained in the pl022 private struct
1502 *
1503 * This function checks if there is any spi message in the queue that
1504 * needs processing and delegate control to appropriate function
1505 * do_polling_transfer()/do_interrupt_dma_transfer()
1506 * based on the kind of the transfer
1507 *
1508 */
1509static void pump_messages(struct kthread_work *work)
1510{ 1479{
1511 struct pl022 *pl022 = 1480 struct pl022 *pl022 = spi_master_get_devdata(master);
1512 container_of(work, struct pl022, pump_messages);
1513 unsigned long flags;
1514 bool was_busy = false;
1515
1516 /* Lock queue and check for queue work */
1517 spin_lock_irqsave(&pl022->queue_lock, flags);
1518 if (list_empty(&pl022->queue) || !pl022->running) {
1519 if (pl022->busy) {
1520 /* nothing more to do - disable spi/ssp and power off */
1521 writew((readw(SSP_CR1(pl022->virtbase)) &
1522 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1523
1524 if (pl022->master_info->autosuspend_delay > 0) {
1525 pm_runtime_mark_last_busy(&pl022->adev->dev);
1526 pm_runtime_put_autosuspend(&pl022->adev->dev);
1527 } else {
1528 pm_runtime_put(&pl022->adev->dev);
1529 }
1530 }
1531 pl022->busy = false;
1532 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1533 return;
1534 }
1535
1536 /* Make sure we are not already running a message */
1537 if (pl022->cur_msg) {
1538 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1539 return;
1540 }
1541 /* Extract head of queue */
1542 pl022->cur_msg =
1543 list_entry(pl022->queue.next, struct spi_message, queue);
1544
1545 list_del_init(&pl022->cur_msg->queue);
1546 if (pl022->busy)
1547 was_busy = true;
1548 else
1549 pl022->busy = true;
1550 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1551 1481
1552 /* Initial message state */ 1482 /* Initial message state */
1553 pl022->cur_msg->state = STATE_START; 1483 pl022->cur_msg = msg;
1554 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, 1484 msg->state = STATE_START;
1555 struct spi_transfer, transfer_list); 1485
1486 pl022->cur_transfer = list_entry(msg->transfers.next,
1487 struct spi_transfer, transfer_list);
1556 1488
1557 /* Setup the SPI using the per chip configuration */ 1489 /* Setup the SPI using the per chip configuration */
1558 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1490 pl022->cur_chip = spi_get_ctldata(msg->spi);
1559 if (!was_busy)
1560 /*
1561 * We enable the core voltage and clocks here, then the clocks
1562 * and core will be disabled when this thread is run again
1563 * and there is no more work to be done.
1564 */
1565 pm_runtime_get_sync(&pl022->adev->dev);
1566 1491
1567 restore_state(pl022); 1492 restore_state(pl022);
1568 flush(pl022); 1493 flush(pl022);
@@ -1571,119 +1496,37 @@ static void pump_messages(struct kthread_work *work)
1571 do_polling_transfer(pl022); 1496 do_polling_transfer(pl022);
1572 else 1497 else
1573 do_interrupt_dma_transfer(pl022); 1498 do_interrupt_dma_transfer(pl022);
1574}
1575
1576static int __init init_queue(struct pl022 *pl022)
1577{
1578 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1579
1580 INIT_LIST_HEAD(&pl022->queue);
1581 spin_lock_init(&pl022->queue_lock);
1582
1583 pl022->running = false;
1584 pl022->busy = false;
1585
1586 tasklet_init(&pl022->pump_transfers, pump_transfers,
1587 (unsigned long)pl022);
1588
1589 init_kthread_worker(&pl022->kworker);
1590 pl022->kworker_task = kthread_run(kthread_worker_fn,
1591 &pl022->kworker,
1592 dev_name(pl022->master->dev.parent));
1593 if (IS_ERR(pl022->kworker_task)) {
1594 dev_err(&pl022->adev->dev,
1595 "failed to create message pump task\n");
1596 return -ENOMEM;
1597 }
1598 init_kthread_work(&pl022->pump_messages, pump_messages);
1599
1600 /*
1601 * Board config will indicate if this controller should run the
1602 * message pump with high (realtime) priority to reduce the transfer
1603 * latency on the bus by minimising the delay between a transfer
1604 * request and the scheduling of the message pump thread. Without this
1605 * setting the message pump thread will remain at default priority.
1606 */
1607 if (pl022->master_info->rt) {
1608 dev_info(&pl022->adev->dev,
1609 "will run message pump with realtime priority\n");
1610 sched_setscheduler(pl022->kworker_task, SCHED_FIFO, &param);
1611 }
1612 1499
1613 return 0; 1500 return 0;
1614} 1501}
1615 1502
1616static int start_queue(struct pl022 *pl022) 1503static int pl022_prepare_transfer_hardware(struct spi_master *master)
1617{ 1504{
1618 unsigned long flags; 1505 struct pl022 *pl022 = spi_master_get_devdata(master);
1619
1620 spin_lock_irqsave(&pl022->queue_lock, flags);
1621
1622 if (pl022->running || pl022->busy) {
1623 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1624 return -EBUSY;
1625 }
1626
1627 pl022->running = true;
1628 pl022->cur_msg = NULL;
1629 pl022->cur_transfer = NULL;
1630 pl022->cur_chip = NULL;
1631 pl022->next_msg_cs_active = false;
1632 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1633
1634 queue_kthread_work(&pl022->kworker, &pl022->pump_messages);
1635 1506
1507 /*
1508 * Just make sure we have all we need to run the transfer by syncing
1509 * with the runtime PM framework.
1510 */
1511 pm_runtime_get_sync(&pl022->adev->dev);
1636 return 0; 1512 return 0;
1637} 1513}
1638 1514
1639static int stop_queue(struct pl022 *pl022) 1515static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1640{ 1516{
1641 unsigned long flags; 1517 struct pl022 *pl022 = spi_master_get_devdata(master);
1642 unsigned limit = 500;
1643 int status = 0;
1644 1518
1645 spin_lock_irqsave(&pl022->queue_lock, flags); 1519 /* nothing more to do - disable spi/ssp and power off */
1520 writew((readw(SSP_CR1(pl022->virtbase)) &
1521 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1646 1522
1647 /* This is a bit lame, but is optimized for the common execution path. 1523 if (pl022->master_info->autosuspend_delay > 0) {
1648 * A wait_queue on the pl022->busy could be used, but then the common 1524 pm_runtime_mark_last_busy(&pl022->adev->dev);
1649 * execution path (pump_messages) would be required to call wake_up or 1525 pm_runtime_put_autosuspend(&pl022->adev->dev);
1650 * friends on every SPI message. Do this instead */ 1526 } else {
1651 while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { 1527 pm_runtime_put(&pl022->adev->dev);
1652 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1653 msleep(10);
1654 spin_lock_irqsave(&pl022->queue_lock, flags);
1655 } 1528 }
1656 1529
1657 if (!list_empty(&pl022->queue) || pl022->busy)
1658 status = -EBUSY;
1659 else
1660 pl022->running = false;
1661
1662 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1663
1664 return status;
1665}
1666
1667static int destroy_queue(struct pl022 *pl022)
1668{
1669 int status;
1670
1671 status = stop_queue(pl022);
1672
1673 /*
1674 * We are unloading the module or failing to load (only two calls
1675 * to this routine), and neither call can handle a return value.
1676 * However, flush_kthread_worker will block until all work is done.
1677 * If the reason that stop_queue timed out is that the work will never
1678 * finish, then it does no good to call flush/stop thread, so
1679 * return anyway.
1680 */
1681 if (status != 0)
1682 return status;
1683
1684 flush_kthread_worker(&pl022->kworker);
1685 kthread_stop(pl022->kworker_task);
1686
1687 return 0; 1530 return 0;
1688} 1531}
1689 1532
@@ -1803,38 +1646,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
1803 return 0; 1646 return 0;
1804} 1647}
1805 1648
1806/**
1807 * pl022_transfer - transfer function registered to SPI master framework
1808 * @spi: spi device which is requesting transfer
1809 * @msg: spi message which is to handled is queued to driver queue
1810 *
1811 * This function is registered to the SPI framework for this SPI master
1812 * controller. It will queue the spi_message in the queue of driver if
1813 * the queue is not stopped and return.
1814 */
1815static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1816{
1817 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1818 unsigned long flags;
1819
1820 spin_lock_irqsave(&pl022->queue_lock, flags);
1821
1822 if (!pl022->running) {
1823 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1824 return -ESHUTDOWN;
1825 }
1826 msg->actual_length = 0;
1827 msg->status = -EINPROGRESS;
1828 msg->state = STATE_START;
1829
1830 list_add_tail(&msg->queue, &pl022->queue);
1831 if (pl022->running && !pl022->busy)
1832 queue_kthread_work(&pl022->kworker, &pl022->pump_messages);
1833
1834 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1835 return 0;
1836}
1837
1838static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) 1649static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1839{ 1650{
1840 return rate / (cpsdvsr * (1 + scr)); 1651 return rate / (cpsdvsr * (1 + scr));
@@ -2197,7 +2008,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2197 master->num_chipselect = platform_info->num_chipselect; 2008 master->num_chipselect = platform_info->num_chipselect;
2198 master->cleanup = pl022_cleanup; 2009 master->cleanup = pl022_cleanup;
2199 master->setup = pl022_setup; 2010 master->setup = pl022_setup;
2200 master->transfer = pl022_transfer; 2011 master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
2012 master->transfer_one_message = pl022_transfer_one_message;
2013 master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2014 master->rt = platform_info->rt;
2201 2015
2202 /* 2016 /*
2203 * Supports mode 0-3, loopback, and active low CS. Transfers are 2017 * Supports mode 0-3, loopback, and active low CS. Transfers are
@@ -2241,6 +2055,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2241 goto err_no_clk_en; 2055 goto err_no_clk_en;
2242 } 2056 }
2243 2057
2058 /* Initialize transfer pump */
2059 tasklet_init(&pl022->pump_transfers, pump_transfers,
2060 (unsigned long)pl022);
2061
2244 /* Disable SSP */ 2062 /* Disable SSP */
2245 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2063 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2246 SSP_CR1(pl022->virtbase)); 2064 SSP_CR1(pl022->virtbase));
@@ -2260,17 +2078,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2260 platform_info->enable_dma = 0; 2078 platform_info->enable_dma = 0;
2261 } 2079 }
2262 2080
2263 /* Initialize and start queue */
2264 status = init_queue(pl022);
2265 if (status != 0) {
2266 dev_err(&adev->dev, "probe - problem initializing queue\n");
2267 goto err_init_queue;
2268 }
2269 status = start_queue(pl022);
2270 if (status != 0) {
2271 dev_err(&adev->dev, "probe - problem starting queue\n");
2272 goto err_start_queue;
2273 }
2274 /* Register with the SPI framework */ 2081 /* Register with the SPI framework */
2275 amba_set_drvdata(adev, pl022); 2082 amba_set_drvdata(adev, pl022);
2276 status = spi_register_master(master); 2083 status = spi_register_master(master);
@@ -2296,9 +2103,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2296 return 0; 2103 return 0;
2297 2104
2298 err_spi_register: 2105 err_spi_register:
2299 err_start_queue:
2300 err_init_queue:
2301 destroy_queue(pl022);
2302 if (platform_info->enable_dma) 2106 if (platform_info->enable_dma)
2303 pl022_dma_remove(pl022); 2107 pl022_dma_remove(pl022);
2304 2108
@@ -2334,9 +2138,6 @@ pl022_remove(struct amba_device *adev)
2334 */ 2138 */
2335 pm_runtime_get_noresume(&adev->dev); 2139 pm_runtime_get_noresume(&adev->dev);
2336 2140
2337 /* Remove the queue */
2338 if (destroy_queue(pl022) != 0)
2339 dev_err(&adev->dev, "queue remove failed\n");
2340 load_ssp_default_config(pl022); 2141 load_ssp_default_config(pl022);
2341 if (pl022->master_info->enable_dma) 2142 if (pl022->master_info->enable_dma)
2342 pl022_dma_remove(pl022); 2143 pl022_dma_remove(pl022);
@@ -2358,12 +2159,12 @@ pl022_remove(struct amba_device *adev)
2358static int pl022_suspend(struct device *dev) 2159static int pl022_suspend(struct device *dev)
2359{ 2160{
2360 struct pl022 *pl022 = dev_get_drvdata(dev); 2161 struct pl022 *pl022 = dev_get_drvdata(dev);
2361 int status = 0; 2162 int ret;
2362 2163
2363 status = stop_queue(pl022); 2164 ret = spi_master_suspend(pl022->master);
2364 if (status) { 2165 if (ret) {
2365 dev_warn(dev, "suspend cannot stop queue\n"); 2166 dev_warn(dev, "cannot suspend master\n");
2366 return status; 2167 return ret;
2367 } 2168 }
2368 2169
2369 dev_dbg(dev, "suspended\n"); 2170 dev_dbg(dev, "suspended\n");
@@ -2373,16 +2174,16 @@ static int pl022_suspend(struct device *dev)
2373static int pl022_resume(struct device *dev) 2174static int pl022_resume(struct device *dev)
2374{ 2175{
2375 struct pl022 *pl022 = dev_get_drvdata(dev); 2176 struct pl022 *pl022 = dev_get_drvdata(dev);
2376 int status = 0; 2177 int ret;
2377 2178
2378 /* Start the queue running */ 2179 /* Start the queue running */
2379 status = start_queue(pl022); 2180 ret = spi_master_resume(pl022->master);
2380 if (status) 2181 if (ret)
2381 dev_err(dev, "problem starting queue (%d)\n", status); 2182 dev_err(dev, "problem starting queue (%d)\n", ret);
2382 else 2183 else
2383 dev_dbg(dev, "resumed\n"); 2184 dev_dbg(dev, "resumed\n");
2384 2185
2385 return status; 2186 return ret;
2386} 2187}
2387#endif /* CONFIG_PM */ 2188#endif /* CONFIG_PM */
2388 2189