diff options
-rw-r--r-- | Documentation/spi/spi-summary | 58 | ||||
-rw-r--r-- | drivers/spi/spi-pl022.c | 303 | ||||
-rw-r--r-- | drivers/spi/spi.c | 339 | ||||
-rw-r--r-- | include/linux/spi/spi.h | 51 |
4 files changed, 487 insertions, 264 deletions
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index 4884cb33845d..7312ec14dd89 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -1,7 +1,7 @@ | |||
1 | Overview of Linux kernel SPI support | 1 | Overview of Linux kernel SPI support |
2 | ==================================== | 2 | ==================================== |
3 | 3 | ||
4 | 21-May-2007 | 4 | 02-Feb-2012 |
5 | 5 | ||
6 | What is SPI? | 6 | What is SPI? |
7 | ------------ | 7 | ------------ |
@@ -483,9 +483,9 @@ also initialize its own internal state. (See below about bus numbering | |||
483 | and those methods.) | 483 | and those methods.) |
484 | 484 | ||
485 | After you initialize the spi_master, then use spi_register_master() to | 485 | After you initialize the spi_master, then use spi_register_master() to |
486 | publish it to the rest of the system. At that time, device nodes for | 486 | publish it to the rest of the system. At that time, device nodes for the |
487 | the controller and any predeclared spi devices will be made available, | 487 | controller and any predeclared spi devices will be made available, and |
488 | and the driver model core will take care of binding them to drivers. | 488 | the driver model core will take care of binding them to drivers. |
489 | 489 | ||
490 | If you need to remove your SPI controller driver, spi_unregister_master() | 490 | If you need to remove your SPI controller driver, spi_unregister_master() |
491 | will reverse the effect of spi_register_master(). | 491 | will reverse the effect of spi_register_master(). |
@@ -521,21 +521,53 @@ SPI MASTER METHODS | |||
521 | ** When you code setup(), ASSUME that the controller | 521 | ** When you code setup(), ASSUME that the controller |
522 | ** is actively processing transfers for another device. | 522 | ** is actively processing transfers for another device. |
523 | 523 | ||
524 | master->transfer(struct spi_device *spi, struct spi_message *message) | ||
525 | This must not sleep. Its responsibility is arrange that the | ||
526 | transfer happens and its complete() callback is issued. The two | ||
527 | will normally happen later, after other transfers complete, and | ||
528 | if the controller is idle it will need to be kickstarted. | ||
529 | |||
530 | master->cleanup(struct spi_device *spi) | 524 | master->cleanup(struct spi_device *spi) |
531 | Your controller driver may use spi_device.controller_state to hold | 525 | Your controller driver may use spi_device.controller_state to hold |
532 | state it dynamically associates with that device. If you do that, | 526 | state it dynamically associates with that device. If you do that, |
533 | be sure to provide the cleanup() method to free that state. | 527 | be sure to provide the cleanup() method to free that state. |
534 | 528 | ||
529 | master->prepare_transfer_hardware(struct spi_master *master) | ||
530 | This will be called by the queue mechanism to signal to the driver | ||
531 | that a message is coming in soon, so the subsystem requests the | ||
532 | driver to prepare the transfer hardware by issuing this call. | ||
533 | This may sleep. | ||
534 | |||
535 | master->unprepare_transfer_hardware(struct spi_master *master) | ||
536 | This will be called by the queue mechanism to signal to the driver | ||
537 | that there are no more messages pending in the queue and it may | ||
538 | relax the hardware (e.g. by power management calls). This may sleep. | ||
539 | |||
540 | master->transfer_one_message(struct spi_master *master, | ||
541 | struct spi_message *mesg) | ||
542 | The subsystem calls the driver to transfer a single message while | ||
543 | queuing transfers that arrive in the meantime. When the driver is | ||
544 | finished with this message, it must call | ||
545 | spi_finalize_current_message() so the subsystem can issue the next | ||
546 | transfer. This may sleep. | ||
547 | |||
548 | DEPRECATED METHODS | ||
549 | |||
550 | master->transfer(struct spi_device *spi, struct spi_message *message) | ||
551 | This must not sleep. Its responsibility is arrange that the | ||
552 | transfer happens and its complete() callback is issued. The two | ||
553 | will normally happen later, after other transfers complete, and | ||
554 | if the controller is idle it will need to be kickstarted. This | ||
555 | method is not used on queued controllers and must be NULL if | ||
556 | transfer_one_message() and (un)prepare_transfer_hardware() are | ||
557 | implemented. | ||
558 | |||
535 | 559 | ||
536 | SPI MESSAGE QUEUE | 560 | SPI MESSAGE QUEUE |
537 | 561 | ||
538 | The bulk of the driver will be managing the I/O queue fed by transfer(). | 562 | If you are happy with the standard queueing mechanism provided by the |
563 | SPI subsystem, just implement the queued methods specified above. Using | ||
564 | the message queue has the upside of centralizing a lot of code and | ||
565 | providing pure process-context execution of methods. The message queue | ||
566 | can also be elevated to realtime priority on high-priority SPI traffic. | ||
567 | |||
568 | Unless the queueing mechanism in the SPI subsystem is selected, the bulk | ||
569 | of the driver will be managing the I/O queue fed by the now deprecated | ||
570 | function transfer(). | ||
539 | 571 | ||
540 | That queue could be purely conceptual. For example, a driver used only | 572 | That queue could be purely conceptual. For example, a driver used only |
541 | for low-frequency sensor access might be fine using synchronous PIO. | 573 | for low-frequency sensor access might be fine using synchronous PIO. |
@@ -561,4 +593,6 @@ Stephen Street | |||
561 | Mark Underwood | 593 | Mark Underwood |
562 | Andrew Victor | 594 | Andrew Victor |
563 | Vitaly Wool | 595 | Vitaly Wool |
564 | 596 | Grant Likely | |
597 | Mark Brown | ||
598 | Linus Walleij | ||
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 81847c9a7586..ec17a7af7e28 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
32 | #include <linux/kthread.h> | ||
33 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
34 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
35 | #include <linux/err.h> | 34 | #include <linux/err.h> |
@@ -41,7 +40,6 @@ | |||
41 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
42 | #include <linux/scatterlist.h> | 41 | #include <linux/scatterlist.h> |
43 | #include <linux/pm_runtime.h> | 42 | #include <linux/pm_runtime.h> |
44 | #include <linux/sched.h> | ||
45 | 43 | ||
46 | /* | 44 | /* |
47 | * This macro is used to define some register default values. | 45 | * This macro is used to define some register default values. |
@@ -367,15 +365,7 @@ struct pl022 { | |||
367 | struct clk *clk; | 365 | struct clk *clk; |
368 | struct spi_master *master; | 366 | struct spi_master *master; |
369 | struct pl022_ssp_controller *master_info; | 367 | struct pl022_ssp_controller *master_info; |
370 | /* Driver message pump */ | 368 | /* Message per-transfer pump */ |
371 | struct kthread_worker kworker; | ||
372 | struct task_struct *kworker_task; | ||
373 | struct kthread_work pump_messages; | ||
374 | spinlock_t queue_lock; | ||
375 | struct list_head queue; | ||
376 | bool busy; | ||
377 | bool running; | ||
378 | /* Message transfer pump */ | ||
379 | struct tasklet_struct pump_transfers; | 369 | struct tasklet_struct pump_transfers; |
380 | struct spi_message *cur_msg; | 370 | struct spi_message *cur_msg; |
381 | struct spi_transfer *cur_transfer; | 371 | struct spi_transfer *cur_transfer; |
@@ -397,6 +387,7 @@ struct pl022 { | |||
397 | struct sg_table sgt_rx; | 387 | struct sg_table sgt_rx; |
398 | struct sg_table sgt_tx; | 388 | struct sg_table sgt_tx; |
399 | char *dummypage; | 389 | char *dummypage; |
390 | bool dma_running; | ||
400 | #endif | 391 | #endif |
401 | }; | 392 | }; |
402 | 393 | ||
@@ -451,8 +442,6 @@ static void null_cs_control(u32 command) | |||
451 | static void giveback(struct pl022 *pl022) | 442 | static void giveback(struct pl022 *pl022) |
452 | { | 443 | { |
453 | struct spi_transfer *last_transfer; | 444 | struct spi_transfer *last_transfer; |
454 | unsigned long flags; | ||
455 | struct spi_message *msg; | ||
456 | pl022->next_msg_cs_active = false; | 445 | pl022->next_msg_cs_active = false; |
457 | 446 | ||
458 | last_transfer = list_entry(pl022->cur_msg->transfers.prev, | 447 | last_transfer = list_entry(pl022->cur_msg->transfers.prev, |
@@ -480,15 +469,8 @@ static void giveback(struct pl022 *pl022) | |||
480 | * sent the current message could be unloaded, which | 469 | * sent the current message could be unloaded, which |
481 | * could invalidate the cs_control() callback... | 470 | * could invalidate the cs_control() callback... |
482 | */ | 471 | */ |
483 | |||
484 | /* get a pointer to the next message, if any */ | 472 | /* get a pointer to the next message, if any */ |
485 | spin_lock_irqsave(&pl022->queue_lock, flags); | 473 | next_msg = spi_get_next_queued_message(pl022->master); |
486 | if (list_empty(&pl022->queue)) | ||
487 | next_msg = NULL; | ||
488 | else | ||
489 | next_msg = list_entry(pl022->queue.next, | ||
490 | struct spi_message, queue); | ||
491 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
492 | 474 | ||
493 | /* | 475 | /* |
494 | * see if the next and current messages point | 476 | * see if the next and current messages point |
@@ -500,19 +482,13 @@ static void giveback(struct pl022 *pl022) | |||
500 | pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); | 482 | pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); |
501 | else | 483 | else |
502 | pl022->next_msg_cs_active = true; | 484 | pl022->next_msg_cs_active = true; |
485 | |||
503 | } | 486 | } |
504 | 487 | ||
505 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
506 | msg = pl022->cur_msg; | ||
507 | pl022->cur_msg = NULL; | 488 | pl022->cur_msg = NULL; |
508 | pl022->cur_transfer = NULL; | 489 | pl022->cur_transfer = NULL; |
509 | pl022->cur_chip = NULL; | 490 | pl022->cur_chip = NULL; |
510 | queue_kthread_work(&pl022->kworker, &pl022->pump_messages); | 491 | spi_finalize_current_message(pl022->master); |
511 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
512 | |||
513 | msg->state = NULL; | ||
514 | if (msg->complete) | ||
515 | msg->complete(msg->context); | ||
516 | } | 492 | } |
517 | 493 | ||
518 | /** | 494 | /** |
@@ -1066,6 +1042,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1066 | dmaengine_submit(txdesc); | 1042 | dmaengine_submit(txdesc); |
1067 | dma_async_issue_pending(rxchan); | 1043 | dma_async_issue_pending(rxchan); |
1068 | dma_async_issue_pending(txchan); | 1044 | dma_async_issue_pending(txchan); |
1045 | pl022->dma_running = true; | ||
1069 | 1046 | ||
1070 | return 0; | 1047 | return 0; |
1071 | 1048 | ||
@@ -1144,11 +1121,12 @@ static void terminate_dma(struct pl022 *pl022) | |||
1144 | dmaengine_terminate_all(rxchan); | 1121 | dmaengine_terminate_all(rxchan); |
1145 | dmaengine_terminate_all(txchan); | 1122 | dmaengine_terminate_all(txchan); |
1146 | unmap_free_dma_scatter(pl022); | 1123 | unmap_free_dma_scatter(pl022); |
1124 | pl022->dma_running = false; | ||
1147 | } | 1125 | } |
1148 | 1126 | ||
1149 | static void pl022_dma_remove(struct pl022 *pl022) | 1127 | static void pl022_dma_remove(struct pl022 *pl022) |
1150 | { | 1128 | { |
1151 | if (pl022->busy) | 1129 | if (pl022->dma_running) |
1152 | terminate_dma(pl022); | 1130 | terminate_dma(pl022); |
1153 | if (pl022->dma_tx_channel) | 1131 | if (pl022->dma_tx_channel) |
1154 | dma_release_channel(pl022->dma_tx_channel); | 1132 | dma_release_channel(pl022->dma_tx_channel); |
@@ -1496,73 +1474,20 @@ out: | |||
1496 | return; | 1474 | return; |
1497 | } | 1475 | } |
1498 | 1476 | ||
1499 | /** | 1477 | static int pl022_transfer_one_message(struct spi_master *master, |
1500 | * pump_messages - kthread work function which processes spi message queue | 1478 | struct spi_message *msg) |
1501 | * @work: pointer to kthread work struct contained in the pl022 private struct | ||
1502 | * | ||
1503 | * This function checks if there is any spi message in the queue that | ||
1504 | * needs processing and delegate control to appropriate function | ||
1505 | * do_polling_transfer()/do_interrupt_dma_transfer() | ||
1506 | * based on the kind of the transfer | ||
1507 | * | ||
1508 | */ | ||
1509 | static void pump_messages(struct kthread_work *work) | ||
1510 | { | 1479 | { |
1511 | struct pl022 *pl022 = | 1480 | struct pl022 *pl022 = spi_master_get_devdata(master); |
1512 | container_of(work, struct pl022, pump_messages); | ||
1513 | unsigned long flags; | ||
1514 | bool was_busy = false; | ||
1515 | |||
1516 | /* Lock queue and check for queue work */ | ||
1517 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1518 | if (list_empty(&pl022->queue) || !pl022->running) { | ||
1519 | if (pl022->busy) { | ||
1520 | /* nothing more to do - disable spi/ssp and power off */ | ||
1521 | writew((readw(SSP_CR1(pl022->virtbase)) & | ||
1522 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | ||
1523 | |||
1524 | if (pl022->master_info->autosuspend_delay > 0) { | ||
1525 | pm_runtime_mark_last_busy(&pl022->adev->dev); | ||
1526 | pm_runtime_put_autosuspend(&pl022->adev->dev); | ||
1527 | } else { | ||
1528 | pm_runtime_put(&pl022->adev->dev); | ||
1529 | } | ||
1530 | } | ||
1531 | pl022->busy = false; | ||
1532 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1533 | return; | ||
1534 | } | ||
1535 | |||
1536 | /* Make sure we are not already running a message */ | ||
1537 | if (pl022->cur_msg) { | ||
1538 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1539 | return; | ||
1540 | } | ||
1541 | /* Extract head of queue */ | ||
1542 | pl022->cur_msg = | ||
1543 | list_entry(pl022->queue.next, struct spi_message, queue); | ||
1544 | |||
1545 | list_del_init(&pl022->cur_msg->queue); | ||
1546 | if (pl022->busy) | ||
1547 | was_busy = true; | ||
1548 | else | ||
1549 | pl022->busy = true; | ||
1550 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1551 | 1481 | ||
1552 | /* Initial message state */ | 1482 | /* Initial message state */ |
1553 | pl022->cur_msg->state = STATE_START; | 1483 | pl022->cur_msg = msg; |
1554 | pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, | 1484 | msg->state = STATE_START; |
1555 | struct spi_transfer, transfer_list); | 1485 | |
1486 | pl022->cur_transfer = list_entry(msg->transfers.next, | ||
1487 | struct spi_transfer, transfer_list); | ||
1556 | 1488 | ||
1557 | /* Setup the SPI using the per chip configuration */ | 1489 | /* Setup the SPI using the per chip configuration */ |
1558 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); | 1490 | pl022->cur_chip = spi_get_ctldata(msg->spi); |
1559 | if (!was_busy) | ||
1560 | /* | ||
1561 | * We enable the core voltage and clocks here, then the clocks | ||
1562 | * and core will be disabled when this thread is run again | ||
1563 | * and there is no more work to be done. | ||
1564 | */ | ||
1565 | pm_runtime_get_sync(&pl022->adev->dev); | ||
1566 | 1491 | ||
1567 | restore_state(pl022); | 1492 | restore_state(pl022); |
1568 | flush(pl022); | 1493 | flush(pl022); |
@@ -1571,119 +1496,37 @@ static void pump_messages(struct kthread_work *work) | |||
1571 | do_polling_transfer(pl022); | 1496 | do_polling_transfer(pl022); |
1572 | else | 1497 | else |
1573 | do_interrupt_dma_transfer(pl022); | 1498 | do_interrupt_dma_transfer(pl022); |
1574 | } | ||
1575 | |||
1576 | static int __init init_queue(struct pl022 *pl022) | ||
1577 | { | ||
1578 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
1579 | |||
1580 | INIT_LIST_HEAD(&pl022->queue); | ||
1581 | spin_lock_init(&pl022->queue_lock); | ||
1582 | |||
1583 | pl022->running = false; | ||
1584 | pl022->busy = false; | ||
1585 | |||
1586 | tasklet_init(&pl022->pump_transfers, pump_transfers, | ||
1587 | (unsigned long)pl022); | ||
1588 | |||
1589 | init_kthread_worker(&pl022->kworker); | ||
1590 | pl022->kworker_task = kthread_run(kthread_worker_fn, | ||
1591 | &pl022->kworker, | ||
1592 | dev_name(pl022->master->dev.parent)); | ||
1593 | if (IS_ERR(pl022->kworker_task)) { | ||
1594 | dev_err(&pl022->adev->dev, | ||
1595 | "failed to create message pump task\n"); | ||
1596 | return -ENOMEM; | ||
1597 | } | ||
1598 | init_kthread_work(&pl022->pump_messages, pump_messages); | ||
1599 | |||
1600 | /* | ||
1601 | * Board config will indicate if this controller should run the | ||
1602 | * message pump with high (realtime) priority to reduce the transfer | ||
1603 | * latency on the bus by minimising the delay between a transfer | ||
1604 | * request and the scheduling of the message pump thread. Without this | ||
1605 | * setting the message pump thread will remain at default priority. | ||
1606 | */ | ||
1607 | if (pl022->master_info->rt) { | ||
1608 | dev_info(&pl022->adev->dev, | ||
1609 | "will run message pump with realtime priority\n"); | ||
1610 | sched_setscheduler(pl022->kworker_task, SCHED_FIFO, ¶m); | ||
1611 | } | ||
1612 | 1499 | ||
1613 | return 0; | 1500 | return 0; |
1614 | } | 1501 | } |
1615 | 1502 | ||
1616 | static int start_queue(struct pl022 *pl022) | 1503 | static int pl022_prepare_transfer_hardware(struct spi_master *master) |
1617 | { | 1504 | { |
1618 | unsigned long flags; | 1505 | struct pl022 *pl022 = spi_master_get_devdata(master); |
1619 | |||
1620 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1621 | |||
1622 | if (pl022->running || pl022->busy) { | ||
1623 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1624 | return -EBUSY; | ||
1625 | } | ||
1626 | |||
1627 | pl022->running = true; | ||
1628 | pl022->cur_msg = NULL; | ||
1629 | pl022->cur_transfer = NULL; | ||
1630 | pl022->cur_chip = NULL; | ||
1631 | pl022->next_msg_cs_active = false; | ||
1632 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1633 | |||
1634 | queue_kthread_work(&pl022->kworker, &pl022->pump_messages); | ||
1635 | 1506 | ||
1507 | /* | ||
1508 | * Just make sure we have all we need to run the transfer by syncing | ||
1509 | * with the runtime PM framework. | ||
1510 | */ | ||
1511 | pm_runtime_get_sync(&pl022->adev->dev); | ||
1636 | return 0; | 1512 | return 0; |
1637 | } | 1513 | } |
1638 | 1514 | ||
1639 | static int stop_queue(struct pl022 *pl022) | 1515 | static int pl022_unprepare_transfer_hardware(struct spi_master *master) |
1640 | { | 1516 | { |
1641 | unsigned long flags; | 1517 | struct pl022 *pl022 = spi_master_get_devdata(master); |
1642 | unsigned limit = 500; | ||
1643 | int status = 0; | ||
1644 | 1518 | ||
1645 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1519 | /* nothing more to do - disable spi/ssp and power off */ |
1520 | writew((readw(SSP_CR1(pl022->virtbase)) & | ||
1521 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | ||
1646 | 1522 | ||
1647 | /* This is a bit lame, but is optimized for the common execution path. | 1523 | if (pl022->master_info->autosuspend_delay > 0) { |
1648 | * A wait_queue on the pl022->busy could be used, but then the common | 1524 | pm_runtime_mark_last_busy(&pl022->adev->dev); |
1649 | * execution path (pump_messages) would be required to call wake_up or | 1525 | pm_runtime_put_autosuspend(&pl022->adev->dev); |
1650 | * friends on every SPI message. Do this instead */ | 1526 | } else { |
1651 | while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { | 1527 | pm_runtime_put(&pl022->adev->dev); |
1652 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1653 | msleep(10); | ||
1654 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1655 | } | 1528 | } |
1656 | 1529 | ||
1657 | if (!list_empty(&pl022->queue) || pl022->busy) | ||
1658 | status = -EBUSY; | ||
1659 | else | ||
1660 | pl022->running = false; | ||
1661 | |||
1662 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1663 | |||
1664 | return status; | ||
1665 | } | ||
1666 | |||
1667 | static int destroy_queue(struct pl022 *pl022) | ||
1668 | { | ||
1669 | int status; | ||
1670 | |||
1671 | status = stop_queue(pl022); | ||
1672 | |||
1673 | /* | ||
1674 | * We are unloading the module or failing to load (only two calls | ||
1675 | * to this routine), and neither call can handle a return value. | ||
1676 | * However, flush_kthread_worker will block until all work is done. | ||
1677 | * If the reason that stop_queue timed out is that the work will never | ||
1678 | * finish, then it does no good to call flush/stop thread, so | ||
1679 | * return anyway. | ||
1680 | */ | ||
1681 | if (status != 0) | ||
1682 | return status; | ||
1683 | |||
1684 | flush_kthread_worker(&pl022->kworker); | ||
1685 | kthread_stop(pl022->kworker_task); | ||
1686 | |||
1687 | return 0; | 1530 | return 0; |
1688 | } | 1531 | } |
1689 | 1532 | ||
@@ -1803,38 +1646,6 @@ static int verify_controller_parameters(struct pl022 *pl022, | |||
1803 | return 0; | 1646 | return 0; |
1804 | } | 1647 | } |
1805 | 1648 | ||
1806 | /** | ||
1807 | * pl022_transfer - transfer function registered to SPI master framework | ||
1808 | * @spi: spi device which is requesting transfer | ||
1809 | * @msg: spi message which is to handled is queued to driver queue | ||
1810 | * | ||
1811 | * This function is registered to the SPI framework for this SPI master | ||
1812 | * controller. It will queue the spi_message in the queue of driver if | ||
1813 | * the queue is not stopped and return. | ||
1814 | */ | ||
1815 | static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | ||
1816 | { | ||
1817 | struct pl022 *pl022 = spi_master_get_devdata(spi->master); | ||
1818 | unsigned long flags; | ||
1819 | |||
1820 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1821 | |||
1822 | if (!pl022->running) { | ||
1823 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1824 | return -ESHUTDOWN; | ||
1825 | } | ||
1826 | msg->actual_length = 0; | ||
1827 | msg->status = -EINPROGRESS; | ||
1828 | msg->state = STATE_START; | ||
1829 | |||
1830 | list_add_tail(&msg->queue, &pl022->queue); | ||
1831 | if (pl022->running && !pl022->busy) | ||
1832 | queue_kthread_work(&pl022->kworker, &pl022->pump_messages); | ||
1833 | |||
1834 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) | 1649 | static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) |
1839 | { | 1650 | { |
1840 | return rate / (cpsdvsr * (1 + scr)); | 1651 | return rate / (cpsdvsr * (1 + scr)); |
@@ -2197,7 +2008,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2197 | master->num_chipselect = platform_info->num_chipselect; | 2008 | master->num_chipselect = platform_info->num_chipselect; |
2198 | master->cleanup = pl022_cleanup; | 2009 | master->cleanup = pl022_cleanup; |
2199 | master->setup = pl022_setup; | 2010 | master->setup = pl022_setup; |
2200 | master->transfer = pl022_transfer; | 2011 | master->prepare_transfer_hardware = pl022_prepare_transfer_hardware; |
2012 | master->transfer_one_message = pl022_transfer_one_message; | ||
2013 | master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; | ||
2014 | master->rt = platform_info->rt; | ||
2201 | 2015 | ||
2202 | /* | 2016 | /* |
2203 | * Supports mode 0-3, loopback, and active low CS. Transfers are | 2017 | * Supports mode 0-3, loopback, and active low CS. Transfers are |
@@ -2241,6 +2055,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2241 | goto err_no_clk_en; | 2055 | goto err_no_clk_en; |
2242 | } | 2056 | } |
2243 | 2057 | ||
2058 | /* Initialize transfer pump */ | ||
2059 | tasklet_init(&pl022->pump_transfers, pump_transfers, | ||
2060 | (unsigned long)pl022); | ||
2061 | |||
2244 | /* Disable SSP */ | 2062 | /* Disable SSP */ |
2245 | writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), | 2063 | writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), |
2246 | SSP_CR1(pl022->virtbase)); | 2064 | SSP_CR1(pl022->virtbase)); |
@@ -2260,17 +2078,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2260 | platform_info->enable_dma = 0; | 2078 | platform_info->enable_dma = 0; |
2261 | } | 2079 | } |
2262 | 2080 | ||
2263 | /* Initialize and start queue */ | ||
2264 | status = init_queue(pl022); | ||
2265 | if (status != 0) { | ||
2266 | dev_err(&adev->dev, "probe - problem initializing queue\n"); | ||
2267 | goto err_init_queue; | ||
2268 | } | ||
2269 | status = start_queue(pl022); | ||
2270 | if (status != 0) { | ||
2271 | dev_err(&adev->dev, "probe - problem starting queue\n"); | ||
2272 | goto err_start_queue; | ||
2273 | } | ||
2274 | /* Register with the SPI framework */ | 2081 | /* Register with the SPI framework */ |
2275 | amba_set_drvdata(adev, pl022); | 2082 | amba_set_drvdata(adev, pl022); |
2276 | status = spi_register_master(master); | 2083 | status = spi_register_master(master); |
@@ -2296,9 +2103,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2296 | return 0; | 2103 | return 0; |
2297 | 2104 | ||
2298 | err_spi_register: | 2105 | err_spi_register: |
2299 | err_start_queue: | ||
2300 | err_init_queue: | ||
2301 | destroy_queue(pl022); | ||
2302 | if (platform_info->enable_dma) | 2106 | if (platform_info->enable_dma) |
2303 | pl022_dma_remove(pl022); | 2107 | pl022_dma_remove(pl022); |
2304 | 2108 | ||
@@ -2334,9 +2138,6 @@ pl022_remove(struct amba_device *adev) | |||
2334 | */ | 2138 | */ |
2335 | pm_runtime_get_noresume(&adev->dev); | 2139 | pm_runtime_get_noresume(&adev->dev); |
2336 | 2140 | ||
2337 | /* Remove the queue */ | ||
2338 | if (destroy_queue(pl022) != 0) | ||
2339 | dev_err(&adev->dev, "queue remove failed\n"); | ||
2340 | load_ssp_default_config(pl022); | 2141 | load_ssp_default_config(pl022); |
2341 | if (pl022->master_info->enable_dma) | 2142 | if (pl022->master_info->enable_dma) |
2342 | pl022_dma_remove(pl022); | 2143 | pl022_dma_remove(pl022); |
@@ -2358,12 +2159,12 @@ pl022_remove(struct amba_device *adev) | |||
2358 | static int pl022_suspend(struct device *dev) | 2159 | static int pl022_suspend(struct device *dev) |
2359 | { | 2160 | { |
2360 | struct pl022 *pl022 = dev_get_drvdata(dev); | 2161 | struct pl022 *pl022 = dev_get_drvdata(dev); |
2361 | int status = 0; | 2162 | int ret; |
2362 | 2163 | ||
2363 | status = stop_queue(pl022); | 2164 | ret = spi_master_suspend(pl022->master); |
2364 | if (status) { | 2165 | if (ret) { |
2365 | dev_warn(dev, "suspend cannot stop queue\n"); | 2166 | dev_warn(dev, "cannot suspend master\n"); |
2366 | return status; | 2167 | return ret; |
2367 | } | 2168 | } |
2368 | 2169 | ||
2369 | dev_dbg(dev, "suspended\n"); | 2170 | dev_dbg(dev, "suspended\n"); |
@@ -2373,16 +2174,16 @@ static int pl022_suspend(struct device *dev) | |||
2373 | static int pl022_resume(struct device *dev) | 2174 | static int pl022_resume(struct device *dev) |
2374 | { | 2175 | { |
2375 | struct pl022 *pl022 = dev_get_drvdata(dev); | 2176 | struct pl022 *pl022 = dev_get_drvdata(dev); |
2376 | int status = 0; | 2177 | int ret; |
2377 | 2178 | ||
2378 | /* Start the queue running */ | 2179 | /* Start the queue running */ |
2379 | status = start_queue(pl022); | 2180 | ret = spi_master_resume(pl022->master); |
2380 | if (status) | 2181 | if (ret) |
2381 | dev_err(dev, "problem starting queue (%d)\n", status); | 2182 | dev_err(dev, "problem starting queue (%d)\n", ret); |
2382 | else | 2183 | else |
2383 | dev_dbg(dev, "resumed\n"); | 2184 | dev_dbg(dev, "resumed\n"); |
2384 | 2185 | ||
2385 | return status; | 2186 | return ret; |
2386 | } | 2187 | } |
2387 | #endif /* CONFIG_PM */ | 2188 | #endif /* CONFIG_PM */ |
2388 | 2189 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b2ccdea30cb9..5ae1e84d9037 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include <linux/of_spi.h> | 30 | #include <linux/of_spi.h> |
31 | #include <linux/pm_runtime.h> | 31 | #include <linux/pm_runtime.h> |
32 | #include <linux/export.h> | 32 | #include <linux/export.h> |
33 | #include <linux/sched.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/kthread.h> | ||
33 | 36 | ||
34 | static void spidev_release(struct device *dev) | 37 | static void spidev_release(struct device *dev) |
35 | { | 38 | { |
@@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) | |||
507 | 510 | ||
508 | /*-------------------------------------------------------------------------*/ | 511 | /*-------------------------------------------------------------------------*/ |
509 | 512 | ||
513 | /** | ||
514 | * spi_pump_messages - kthread work function which processes spi message queue | ||
515 | * @work: pointer to kthread work struct contained in the master struct | ||
516 | * | ||
517 | * This function checks if there is any spi message in the queue that | ||
518 | * needs processing and if so call out to the driver to initialize hardware | ||
519 | * and transfer each message. | ||
520 | * | ||
521 | */ | ||
522 | static void spi_pump_messages(struct kthread_work *work) | ||
523 | { | ||
524 | struct spi_master *master = | ||
525 | container_of(work, struct spi_master, pump_messages); | ||
526 | unsigned long flags; | ||
527 | bool was_busy = false; | ||
528 | int ret; | ||
529 | |||
530 | /* Lock queue and check for queue work */ | ||
531 | spin_lock_irqsave(&master->queue_lock, flags); | ||
532 | if (list_empty(&master->queue) || !master->running) { | ||
533 | if (master->busy) { | ||
534 | ret = master->unprepare_transfer_hardware(master); | ||
535 | if (ret) { | ||
536 | dev_err(&master->dev, | ||
537 | "failed to unprepare transfer hardware\n"); | ||
538 | return; | ||
539 | } | ||
540 | } | ||
541 | master->busy = false; | ||
542 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | /* Make sure we are not already running a message */ | ||
547 | if (master->cur_msg) { | ||
548 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
549 | return; | ||
550 | } | ||
551 | /* Extract head of queue */ | ||
552 | master->cur_msg = | ||
553 | list_entry(master->queue.next, struct spi_message, queue); | ||
554 | |||
555 | list_del_init(&master->cur_msg->queue); | ||
556 | if (master->busy) | ||
557 | was_busy = true; | ||
558 | else | ||
559 | master->busy = true; | ||
560 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
561 | |||
562 | if (!was_busy) { | ||
563 | ret = master->prepare_transfer_hardware(master); | ||
564 | if (ret) { | ||
565 | dev_err(&master->dev, | ||
566 | "failed to prepare transfer hardware\n"); | ||
567 | return; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | ret = master->transfer_one_message(master, master->cur_msg); | ||
572 | if (ret) { | ||
573 | dev_err(&master->dev, | ||
574 | "failed to transfer one message from queue\n"); | ||
575 | return; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | static int spi_init_queue(struct spi_master *master) | ||
580 | { | ||
581 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
582 | |||
583 | INIT_LIST_HEAD(&master->queue); | ||
584 | spin_lock_init(&master->queue_lock); | ||
585 | |||
586 | master->running = false; | ||
587 | master->busy = false; | ||
588 | |||
589 | init_kthread_worker(&master->kworker); | ||
590 | master->kworker_task = kthread_run(kthread_worker_fn, | ||
591 | &master->kworker, | ||
592 | dev_name(&master->dev)); | ||
593 | if (IS_ERR(master->kworker_task)) { | ||
594 | dev_err(&master->dev, "failed to create message pump task\n"); | ||
595 | return -ENOMEM; | ||
596 | } | ||
597 | init_kthread_work(&master->pump_messages, spi_pump_messages); | ||
598 | |||
599 | /* | ||
600 | * Master config will indicate if this controller should run the | ||
601 | * message pump with high (realtime) priority to reduce the transfer | ||
602 | * latency on the bus by minimising the delay between a transfer | ||
603 | * request and the scheduling of the message pump thread. Without this | ||
604 | * setting the message pump thread will remain at default priority. | ||
605 | */ | ||
606 | if (master->rt) { | ||
607 | dev_info(&master->dev, | ||
608 | "will run message pump with realtime priority\n"); | ||
609 | sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); | ||
610 | } | ||
611 | |||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | /** | ||
616 | * spi_get_next_queued_message() - called by driver to check for queued | ||
617 | * messages | ||
618 | * @master: the master to check for queued messages | ||
619 | * | ||
620 | * If there are more messages in the queue, the next message is returned from | ||
621 | * this call. | ||
622 | */ | ||
623 | struct spi_message *spi_get_next_queued_message(struct spi_master *master) | ||
624 | { | ||
625 | struct spi_message *next; | ||
626 | unsigned long flags; | ||
627 | |||
628 | /* get a pointer to the next message, if any */ | ||
629 | spin_lock_irqsave(&master->queue_lock, flags); | ||
630 | if (list_empty(&master->queue)) | ||
631 | next = NULL; | ||
632 | else | ||
633 | next = list_entry(master->queue.next, | ||
634 | struct spi_message, queue); | ||
635 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
636 | |||
637 | return next; | ||
638 | } | ||
639 | EXPORT_SYMBOL_GPL(spi_get_next_queued_message); | ||
640 | |||
641 | /** | ||
642 | * spi_finalize_current_message() - the current message is complete | ||
643 | * @master: the master to return the message to | ||
644 | * | ||
645 | * Called by the driver to notify the core that the message in the front of the | ||
646 | * queue is complete and can be removed from the queue. | ||
647 | */ | ||
648 | void spi_finalize_current_message(struct spi_master *master) | ||
649 | { | ||
650 | struct spi_message *mesg; | ||
651 | unsigned long flags; | ||
652 | |||
653 | spin_lock_irqsave(&master->queue_lock, flags); | ||
654 | mesg = master->cur_msg; | ||
655 | master->cur_msg = NULL; | ||
656 | |||
657 | queue_kthread_work(&master->kworker, &master->pump_messages); | ||
658 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
659 | |||
660 | mesg->state = NULL; | ||
661 | if (mesg->complete) | ||
662 | mesg->complete(mesg->context); | ||
663 | } | ||
664 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | ||
665 | |||
666 | static int spi_start_queue(struct spi_master *master) | ||
667 | { | ||
668 | unsigned long flags; | ||
669 | |||
670 | spin_lock_irqsave(&master->queue_lock, flags); | ||
671 | |||
672 | if (master->running || master->busy) { | ||
673 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
674 | return -EBUSY; | ||
675 | } | ||
676 | |||
677 | master->running = true; | ||
678 | master->cur_msg = NULL; | ||
679 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
680 | |||
681 | queue_kthread_work(&master->kworker, &master->pump_messages); | ||
682 | |||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static int spi_stop_queue(struct spi_master *master) | ||
687 | { | ||
688 | unsigned long flags; | ||
689 | unsigned limit = 500; | ||
690 | int ret = 0; | ||
691 | |||
692 | spin_lock_irqsave(&master->queue_lock, flags); | ||
693 | |||
694 | /* | ||
695 | * This is a bit lame, but is optimized for the common execution path. | ||
696 | * A wait_queue on the master->busy could be used, but then the common | ||
697 | * execution path (pump_messages) would be required to call wake_up or | ||
698 | * friends on every SPI message. Do this instead. | ||
699 | */ | ||
700 | while ((!list_empty(&master->queue) || master->busy) && limit--) { | ||
701 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
702 | msleep(10); | ||
703 | spin_lock_irqsave(&master->queue_lock, flags); | ||
704 | } | ||
705 | |||
706 | if (!list_empty(&master->queue) || master->busy) | ||
707 | ret = -EBUSY; | ||
708 | else | ||
709 | master->running = false; | ||
710 | |||
711 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
712 | |||
713 | if (ret) { | ||
714 | dev_warn(&master->dev, | ||
715 | "could not stop message queue\n"); | ||
716 | return ret; | ||
717 | } | ||
718 | return ret; | ||
719 | } | ||
720 | |||
721 | static int spi_destroy_queue(struct spi_master *master) | ||
722 | { | ||
723 | int ret; | ||
724 | |||
725 | ret = spi_stop_queue(master); | ||
726 | |||
727 | /* | ||
728 | * flush_kthread_worker will block until all work is done. | ||
729 | * If the reason that stop_queue timed out is that the work will never | ||
730 | * finish, then it does no good to call flush/stop thread, so | ||
731 | * return anyway. | ||
732 | */ | ||
733 | if (ret) { | ||
734 | dev_err(&master->dev, "problem destroying queue\n"); | ||
735 | return ret; | ||
736 | } | ||
737 | |||
738 | flush_kthread_worker(&master->kworker); | ||
739 | kthread_stop(master->kworker_task); | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | /** | ||
745 | * spi_queued_transfer - transfer function for queued transfers | ||
746 | * @spi: spi device which is requesting transfer | ||
747 | * @msg: spi message which is to handled is queued to driver queue | ||
748 | */ | ||
749 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | ||
750 | { | ||
751 | struct spi_master *master = spi->master; | ||
752 | unsigned long flags; | ||
753 | |||
754 | spin_lock_irqsave(&master->queue_lock, flags); | ||
755 | |||
756 | if (!master->running) { | ||
757 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
758 | return -ESHUTDOWN; | ||
759 | } | ||
760 | msg->actual_length = 0; | ||
761 | msg->status = -EINPROGRESS; | ||
762 | |||
763 | list_add_tail(&msg->queue, &master->queue); | ||
764 | if (master->running && !master->busy) | ||
765 | queue_kthread_work(&master->kworker, &master->pump_messages); | ||
766 | |||
767 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
768 | return 0; | ||
769 | } | ||
770 | |||
771 | static int spi_master_initialize_queue(struct spi_master *master) | ||
772 | { | ||
773 | int ret; | ||
774 | |||
775 | master->queued = true; | ||
776 | master->transfer = spi_queued_transfer; | ||
777 | |||
778 | /* Initialize and start queue */ | ||
779 | ret = spi_init_queue(master); | ||
780 | if (ret) { | ||
781 | dev_err(&master->dev, "problem initializing queue\n"); | ||
782 | goto err_init_queue; | ||
783 | } | ||
784 | ret = spi_start_queue(master); | ||
785 | if (ret) { | ||
786 | dev_err(&master->dev, "problem starting queue\n"); | ||
787 | goto err_start_queue; | ||
788 | } | ||
789 | |||
790 | return 0; | ||
791 | |||
792 | err_start_queue: | ||
793 | err_init_queue: | ||
794 | spi_destroy_queue(master); | ||
795 | return ret; | ||
796 | } | ||
797 | |||
798 | /*-------------------------------------------------------------------------*/ | ||
799 | |||
510 | static void spi_master_release(struct device *dev) | 800 | static void spi_master_release(struct device *dev) |
511 | { | 801 | { |
512 | struct spi_master *master; | 802 | struct spi_master *master; |
@@ -522,6 +812,7 @@ static struct class spi_master_class = { | |||
522 | }; | 812 | }; |
523 | 813 | ||
524 | 814 | ||
815 | |||
525 | /** | 816 | /** |
526 | * spi_alloc_master - allocate SPI master controller | 817 | * spi_alloc_master - allocate SPI master controller |
527 | * @dev: the controller, possibly using the platform_bus | 818 | * @dev: the controller, possibly using the platform_bus |
@@ -621,6 +912,17 @@ int spi_register_master(struct spi_master *master) | |||
621 | dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), | 912 | dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), |
622 | dynamic ? " (dynamic)" : ""); | 913 | dynamic ? " (dynamic)" : ""); |
623 | 914 | ||
915 | /* If we're using a queued driver, start the queue */ | ||
916 | if (master->transfer) | ||
917 | dev_info(dev, "master is unqueued, this is deprecated\n"); | ||
918 | else { | ||
919 | status = spi_master_initialize_queue(master); | ||
920 | if (status) { | ||
921 | device_unregister(&master->dev); | ||
922 | goto done; | ||
923 | } | ||
924 | } | ||
925 | |||
624 | mutex_lock(&board_lock); | 926 | mutex_lock(&board_lock); |
625 | list_add_tail(&master->list, &spi_master_list); | 927 | list_add_tail(&master->list, &spi_master_list); |
626 | list_for_each_entry(bi, &board_list, list) | 928 | list_for_each_entry(bi, &board_list, list) |
@@ -636,7 +938,6 @@ done: | |||
636 | } | 938 | } |
637 | EXPORT_SYMBOL_GPL(spi_register_master); | 939 | EXPORT_SYMBOL_GPL(spi_register_master); |
638 | 940 | ||
639 | |||
640 | static int __unregister(struct device *dev, void *null) | 941 | static int __unregister(struct device *dev, void *null) |
641 | { | 942 | { |
642 | spi_unregister_device(to_spi_device(dev)); | 943 | spi_unregister_device(to_spi_device(dev)); |
@@ -657,6 +958,11 @@ void spi_unregister_master(struct spi_master *master) | |||
657 | { | 958 | { |
658 | int dummy; | 959 | int dummy; |
659 | 960 | ||
961 | if (master->queued) { | ||
962 | if (spi_destroy_queue(master)) | ||
963 | dev_err(&master->dev, "queue remove failed\n"); | ||
964 | } | ||
965 | |||
660 | mutex_lock(&board_lock); | 966 | mutex_lock(&board_lock); |
661 | list_del(&master->list); | 967 | list_del(&master->list); |
662 | mutex_unlock(&board_lock); | 968 | mutex_unlock(&board_lock); |
@@ -666,6 +972,37 @@ void spi_unregister_master(struct spi_master *master) | |||
666 | } | 972 | } |
667 | EXPORT_SYMBOL_GPL(spi_unregister_master); | 973 | EXPORT_SYMBOL_GPL(spi_unregister_master); |
668 | 974 | ||
975 | int spi_master_suspend(struct spi_master *master) | ||
976 | { | ||
977 | int ret; | ||
978 | |||
979 | /* Basically no-ops for non-queued masters */ | ||
980 | if (!master->queued) | ||
981 | return 0; | ||
982 | |||
983 | ret = spi_stop_queue(master); | ||
984 | if (ret) | ||
985 | dev_err(&master->dev, "queue stop failed\n"); | ||
986 | |||
987 | return ret; | ||
988 | } | ||
989 | EXPORT_SYMBOL_GPL(spi_master_suspend); | ||
990 | |||
991 | int spi_master_resume(struct spi_master *master) | ||
992 | { | ||
993 | int ret; | ||
994 | |||
995 | if (!master->queued) | ||
996 | return 0; | ||
997 | |||
998 | ret = spi_start_queue(master); | ||
999 | if (ret) | ||
1000 | dev_err(&master->dev, "queue restart failed\n"); | ||
1001 | |||
1002 | return ret; | ||
1003 | } | ||
1004 | EXPORT_SYMBOL_GPL(spi_master_resume); | ||
1005 | |||
669 | static int __spi_master_match(struct device *dev, void *data) | 1006 | static int __spi_master_match(struct device *dev, void *data) |
670 | { | 1007 | { |
671 | struct spi_master *m; | 1008 | struct spi_master *m; |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 176fce9cc6b1..f9e30a5b3543 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/mod_devicetable.h> | 23 | #include <linux/mod_devicetable.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/kthread.h> | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * INTERFACES between SPI master-side drivers and SPI infrastructure. | 28 | * INTERFACES between SPI master-side drivers and SPI infrastructure. |
@@ -235,6 +236,27 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
235 | * the device whose settings are being modified. | 236 | * the device whose settings are being modified. |
236 | * @transfer: adds a message to the controller's transfer queue. | 237 | * @transfer: adds a message to the controller's transfer queue. |
237 | * @cleanup: frees controller-specific state | 238 | * @cleanup: frees controller-specific state |
239 | * @queued: whether this master is providing an internal message queue | ||
240 | * @kworker: thread struct for message pump | ||
241 | * @kworker_task: pointer to task for message pump kworker thread | ||
242 | * @pump_messages: work struct for scheduling work to the message pump | ||
243 | * @queue_lock: spinlock to syncronise access to message queue | ||
244 | * @queue: message queue | ||
245 | * @cur_msg: the currently in-flight message | ||
246 | * @busy: message pump is busy | ||
247 | * @running: message pump is running | ||
248 | * @rt: whether this queue is set to run as a realtime task | ||
249 | * @prepare_transfer_hardware: a message will soon arrive from the queue | ||
250 | * so the subsystem requests the driver to prepare the transfer hardware | ||
251 | * by issuing this call | ||
252 | * @transfer_one_message: the subsystem calls the driver to transfer a single | ||
253 | * message while queuing transfers that arrive in the meantime. When the | ||
254 | * driver is finished with this message, it must call | ||
255 | * spi_finalize_current_message() so the subsystem can issue the next | ||
256 | * transfer | ||
257 | * @prepare_transfer_hardware: there are currently no more messages on the | ||
258 | * queue so the subsystem notifies the driver that it may relax the | ||
259 | * hardware by issuing this call | ||
238 | * | 260 | * |
239 | * Each SPI master controller can communicate with one or more @spi_device | 261 | * Each SPI master controller can communicate with one or more @spi_device |
240 | * children. These make a small bus, sharing MOSI, MISO and SCK signals | 262 | * children. These make a small bus, sharing MOSI, MISO and SCK signals |
@@ -318,6 +340,28 @@ struct spi_master { | |||
318 | 340 | ||
319 | /* called on release() to free memory provided by spi_master */ | 341 | /* called on release() to free memory provided by spi_master */ |
320 | void (*cleanup)(struct spi_device *spi); | 342 | void (*cleanup)(struct spi_device *spi); |
343 | |||
344 | /* | ||
345 | * These hooks are for drivers that want to use the generic | ||
346 | * master transfer queueing mechanism. If these are used, the | ||
347 | * transfer() function above must NOT be specified by the driver. | ||
348 | * Over time we expect SPI drivers to be phased over to this API. | ||
349 | */ | ||
350 | bool queued; | ||
351 | struct kthread_worker kworker; | ||
352 | struct task_struct *kworker_task; | ||
353 | struct kthread_work pump_messages; | ||
354 | spinlock_t queue_lock; | ||
355 | struct list_head queue; | ||
356 | struct spi_message *cur_msg; | ||
357 | bool busy; | ||
358 | bool running; | ||
359 | bool rt; | ||
360 | |||
361 | int (*prepare_transfer_hardware)(struct spi_master *master); | ||
362 | int (*transfer_one_message)(struct spi_master *master, | ||
363 | struct spi_message *mesg); | ||
364 | int (*unprepare_transfer_hardware)(struct spi_master *master); | ||
321 | }; | 365 | }; |
322 | 366 | ||
323 | static inline void *spi_master_get_devdata(struct spi_master *master) | 367 | static inline void *spi_master_get_devdata(struct spi_master *master) |
@@ -343,6 +387,13 @@ static inline void spi_master_put(struct spi_master *master) | |||
343 | put_device(&master->dev); | 387 | put_device(&master->dev); |
344 | } | 388 | } |
345 | 389 | ||
390 | /* PM calls that need to be issued by the driver */ | ||
391 | extern int spi_master_suspend(struct spi_master *master); | ||
392 | extern int spi_master_resume(struct spi_master *master); | ||
393 | |||
394 | /* Calls the driver make to interact with the message queue */ | ||
395 | extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); | ||
396 | extern void spi_finalize_current_message(struct spi_master *master); | ||
346 | 397 | ||
347 | /* the spi driver core manages memory for the spi_master classdev */ | 398 | /* the spi driver core manages memory for the spi_master classdev */ |
348 | extern struct spi_master * | 399 | extern struct spi_master * |