diff options
| -rw-r--r-- | drivers/spi/spi.c | 225 | ||||
| -rw-r--r-- | include/linux/spi/spi.h | 12 |
2 files changed, 204 insertions, 33 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b3a1f9259b62..fdde7061ef58 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -527,6 +527,10 @@ int spi_register_master(struct spi_master *master) | |||
| 527 | dynamic = 1; | 527 | dynamic = 1; |
| 528 | } | 528 | } |
| 529 | 529 | ||
| 530 | spin_lock_init(&master->bus_lock_spinlock); | ||
| 531 | mutex_init(&master->bus_lock_mutex); | ||
| 532 | master->bus_lock_flag = 0; | ||
| 533 | |||
| 530 | /* register the device, then userspace will see it. | 534 | /* register the device, then userspace will see it. |
| 531 | * registration fails if the bus ID is in use. | 535 | * registration fails if the bus ID is in use. |
| 532 | */ | 536 | */ |
| @@ -666,6 +670,35 @@ int spi_setup(struct spi_device *spi) | |||
| 666 | } | 670 | } |
| 667 | EXPORT_SYMBOL_GPL(spi_setup); | 671 | EXPORT_SYMBOL_GPL(spi_setup); |
| 668 | 672 | ||
| 673 | static int __spi_async(struct spi_device *spi, struct spi_message *message) | ||
| 674 | { | ||
| 675 | struct spi_master *master = spi->master; | ||
| 676 | |||
| 677 | /* Half-duplex links include original MicroWire, and ones with | ||
| 678 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
| 679 | * either MOSI or MISO is missing. They can also be caused by | ||
| 680 | * software limitations. | ||
| 681 | */ | ||
| 682 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
| 683 | || (spi->mode & SPI_3WIRE)) { | ||
| 684 | struct spi_transfer *xfer; | ||
| 685 | unsigned flags = master->flags; | ||
| 686 | |||
| 687 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | ||
| 688 | if (xfer->rx_buf && xfer->tx_buf) | ||
| 689 | return -EINVAL; | ||
| 690 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | ||
| 691 | return -EINVAL; | ||
| 692 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
| 693 | return -EINVAL; | ||
| 694 | } | ||
| 695 | } | ||
| 696 | |||
| 697 | message->spi = spi; | ||
| 698 | message->status = -EINPROGRESS; | ||
| 699 | return master->transfer(spi, message); | ||
| 700 | } | ||
| 701 | |||
| 669 | /** | 702 | /** |
| 670 | * spi_async - asynchronous SPI transfer | 703 | * spi_async - asynchronous SPI transfer |
| 671 | * @spi: device with which data will be exchanged | 704 | * @spi: device with which data will be exchanged |
| @@ -698,33 +731,68 @@ EXPORT_SYMBOL_GPL(spi_setup); | |||
| 698 | int spi_async(struct spi_device *spi, struct spi_message *message) | 731 | int spi_async(struct spi_device *spi, struct spi_message *message) |
| 699 | { | 732 | { |
| 700 | struct spi_master *master = spi->master; | 733 | struct spi_master *master = spi->master; |
| 734 | int ret; | ||
| 735 | unsigned long flags; | ||
| 701 | 736 | ||
| 702 | /* Half-duplex links include original MicroWire, and ones with | 737 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); |
| 703 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
| 704 | * either MOSI or MISO is missing. They can also be caused by | ||
| 705 | * software limitations. | ||
| 706 | */ | ||
| 707 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
| 708 | || (spi->mode & SPI_3WIRE)) { | ||
| 709 | struct spi_transfer *xfer; | ||
| 710 | unsigned flags = master->flags; | ||
| 711 | 738 | ||
| 712 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | 739 | if (master->bus_lock_flag) |
| 713 | if (xfer->rx_buf && xfer->tx_buf) | 740 | ret = -EBUSY; |
| 714 | return -EINVAL; | 741 | else |
| 715 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | 742 | ret = __spi_async(spi, message); |
| 716 | return -EINVAL; | ||
| 717 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
| 718 | return -EINVAL; | ||
| 719 | } | ||
| 720 | } | ||
| 721 | 743 | ||
| 722 | message->spi = spi; | 744 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); |
| 723 | message->status = -EINPROGRESS; | 745 | |
| 724 | return master->transfer(spi, message); | 746 | return ret; |
| 725 | } | 747 | } |
| 726 | EXPORT_SYMBOL_GPL(spi_async); | 748 | EXPORT_SYMBOL_GPL(spi_async); |
| 727 | 749 | ||
| 750 | /** | ||
| 751 | * spi_async_locked - version of spi_async with exclusive bus usage | ||
| 752 | * @spi: device with which data will be exchanged | ||
| 753 | * @message: describes the data transfers, including completion callback | ||
| 754 | * Context: any (irqs may be blocked, etc) | ||
| 755 | * | ||
| 756 | * This call may be used in_irq and other contexts which can't sleep, | ||
| 757 | * as well as from task contexts which can sleep. | ||
| 758 | * | ||
| 759 | * The completion callback is invoked in a context which can't sleep. | ||
| 760 | * Before that invocation, the value of message->status is undefined. | ||
| 761 | * When the callback is issued, message->status holds either zero (to | ||
| 762 | * indicate complete success) or a negative error code. After that | ||
| 763 | * callback returns, the driver which issued the transfer request may | ||
| 764 | * deallocate the associated memory; it's no longer in use by any SPI | ||
| 765 | * core or controller driver code. | ||
| 766 | * | ||
| 767 | * Note that although all messages to a spi_device are handled in | ||
| 768 | * FIFO order, messages may go to different devices in other orders. | ||
| 769 | * Some device might be higher priority, or have various "hard" access | ||
| 770 | * time requirements, for example. | ||
| 771 | * | ||
| 772 | * On detection of any fault during the transfer, processing of | ||
| 773 | * the entire message is aborted, and the device is deselected. | ||
| 774 | * Until returning from the associated message completion callback, | ||
| 775 | * no other spi_message queued to that device will be processed. | ||
| 776 | * (This rule applies equally to all the synchronous transfer calls, | ||
| 777 | * which are wrappers around this core asynchronous primitive.) | ||
| 778 | */ | ||
| 779 | int spi_async_locked(struct spi_device *spi, struct spi_message *message) | ||
| 780 | { | ||
| 781 | struct spi_master *master = spi->master; | ||
| 782 | int ret; | ||
| 783 | unsigned long flags; | ||
| 784 | |||
| 785 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
| 786 | |||
| 787 | ret = __spi_async(spi, message); | ||
| 788 | |||
| 789 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
| 790 | |||
| 791 | return ret; | ||
| 792 | |||
| 793 | } | ||
| 794 | EXPORT_SYMBOL_GPL(spi_async_locked); | ||
| 795 | |||
| 728 | 796 | ||
| 729 | /*-------------------------------------------------------------------------*/ | 797 | /*-------------------------------------------------------------------------*/ |
| 730 | 798 | ||
| @@ -738,6 +806,32 @@ static void spi_complete(void *arg) | |||
| 738 | complete(arg); | 806 | complete(arg); |
| 739 | } | 807 | } |
| 740 | 808 | ||
| 809 | static int __spi_sync(struct spi_device *spi, struct spi_message *message, | ||
| 810 | int bus_locked) | ||
| 811 | { | ||
| 812 | DECLARE_COMPLETION_ONSTACK(done); | ||
| 813 | int status; | ||
| 814 | struct spi_master *master = spi->master; | ||
| 815 | |||
| 816 | message->complete = spi_complete; | ||
| 817 | message->context = &done; | ||
| 818 | |||
| 819 | if (!bus_locked) | ||
| 820 | mutex_lock(&master->bus_lock_mutex); | ||
| 821 | |||
| 822 | status = spi_async_locked(spi, message); | ||
| 823 | |||
| 824 | if (!bus_locked) | ||
| 825 | mutex_unlock(&master->bus_lock_mutex); | ||
| 826 | |||
| 827 | if (status == 0) { | ||
| 828 | wait_for_completion(&done); | ||
| 829 | status = message->status; | ||
| 830 | } | ||
| 831 | message->context = NULL; | ||
| 832 | return status; | ||
| 833 | } | ||
| 834 | |||
| 741 | /** | 835 | /** |
| 742 | * spi_sync - blocking/synchronous SPI data transfers | 836 | * spi_sync - blocking/synchronous SPI data transfers |
| 743 | * @spi: device with which data will be exchanged | 837 | * @spi: device with which data will be exchanged |
| @@ -761,21 +855,86 @@ static void spi_complete(void *arg) | |||
| 761 | */ | 855 | */ |
| 762 | int spi_sync(struct spi_device *spi, struct spi_message *message) | 856 | int spi_sync(struct spi_device *spi, struct spi_message *message) |
| 763 | { | 857 | { |
| 764 | DECLARE_COMPLETION_ONSTACK(done); | 858 | return __spi_sync(spi, message, 0); |
| 765 | int status; | ||
| 766 | |||
| 767 | message->complete = spi_complete; | ||
| 768 | message->context = &done; | ||
| 769 | status = spi_async(spi, message); | ||
| 770 | if (status == 0) { | ||
| 771 | wait_for_completion(&done); | ||
| 772 | status = message->status; | ||
| 773 | } | ||
| 774 | message->context = NULL; | ||
| 775 | return status; | ||
| 776 | } | 859 | } |
| 777 | EXPORT_SYMBOL_GPL(spi_sync); | 860 | EXPORT_SYMBOL_GPL(spi_sync); |
| 778 | 861 | ||
| 862 | /** | ||
| 863 | * spi_sync_locked - version of spi_sync with exclusive bus usage | ||
| 864 | * @spi: device with which data will be exchanged | ||
| 865 | * @message: describes the data transfers | ||
| 866 | * Context: can sleep | ||
| 867 | * | ||
| 868 | * This call may only be used from a context that may sleep. The sleep | ||
| 869 | * is non-interruptible, and has no timeout. Low-overhead controller | ||
| 870 | * drivers may DMA directly into and out of the message buffers. | ||
| 871 | * | ||
| 872 | * This call should be used by drivers that require exclusive access to the | ||
| 873 | * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must | ||
| 874 | * be released by a spi_bus_unlock call when the exclusive access is over. | ||
| 875 | * | ||
| 876 | * It returns zero on success, else a negative error code. | ||
| 877 | */ | ||
| 878 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) | ||
| 879 | { | ||
| 880 | return __spi_sync(spi, message, 1); | ||
| 881 | } | ||
| 882 | EXPORT_SYMBOL_GPL(spi_sync_locked); | ||
| 883 | |||
| 884 | /** | ||
| 885 | * spi_bus_lock - obtain a lock for exclusive SPI bus usage | ||
| 886 | * @master: SPI bus master that should be locked for exclusive bus access | ||
| 887 | * Context: can sleep | ||
| 888 | * | ||
| 889 | * This call may only be used from a context that may sleep. The sleep | ||
| 890 | * is non-interruptible, and has no timeout. | ||
| 891 | * | ||
| 892 | * This call should be used by drivers that require exclusive access to the | ||
| 893 | * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the | ||
| 894 | * exclusive access is over. Data transfer must be done by spi_sync_locked | ||
| 895 | * and spi_async_locked calls when the SPI bus lock is held. | ||
| 896 | * | ||
| 897 | * It returns zero on success, else a negative error code. | ||
| 898 | */ | ||
| 899 | int spi_bus_lock(struct spi_master *master) | ||
| 900 | { | ||
| 901 | unsigned long flags; | ||
| 902 | |||
| 903 | mutex_lock(&master->bus_lock_mutex); | ||
| 904 | |||
| 905 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
| 906 | master->bus_lock_flag = 1; | ||
| 907 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
| 908 | |||
| 909 | /* mutex remains locked until spi_bus_unlock is called */ | ||
| 910 | |||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | EXPORT_SYMBOL_GPL(spi_bus_lock); | ||
| 914 | |||
| 915 | /** | ||
| 916 | * spi_bus_unlock - release the lock for exclusive SPI bus usage | ||
| 917 | * @master: SPI bus master that was locked for exclusive bus access | ||
| 918 | * Context: can sleep | ||
| 919 | * | ||
| 920 | * This call may only be used from a context that may sleep. The sleep | ||
| 921 | * is non-interruptible, and has no timeout. | ||
| 922 | * | ||
| 923 | * This call releases an SPI bus lock previously obtained by an spi_bus_lock | ||
| 924 | * call. | ||
| 925 | * | ||
| 926 | * It returns zero on success, else a negative error code. | ||
| 927 | */ | ||
| 928 | int spi_bus_unlock(struct spi_master *master) | ||
| 929 | { | ||
| 930 | master->bus_lock_flag = 0; | ||
| 931 | |||
| 932 | mutex_unlock(&master->bus_lock_mutex); | ||
| 933 | |||
| 934 | return 0; | ||
| 935 | } | ||
| 936 | EXPORT_SYMBOL_GPL(spi_bus_unlock); | ||
| 937 | |||
| 779 | /* portable code must never pass more than 32 bytes */ | 938 | /* portable code must never pass more than 32 bytes */ |
| 780 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) | 939 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) |
| 781 | 940 | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index af56071b06f9..ae0a5286f558 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -262,6 +262,13 @@ struct spi_master { | |||
| 262 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ | 262 | #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ |
| 263 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ | 263 | #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ |
| 264 | 264 | ||
| 265 | /* lock and mutex for SPI bus locking */ | ||
| 266 | spinlock_t bus_lock_spinlock; | ||
| 267 | struct mutex bus_lock_mutex; | ||
| 268 | |||
| 269 | /* flag indicating that the SPI bus is locked for exclusive use */ | ||
| 270 | bool bus_lock_flag; | ||
| 271 | |||
| 265 | /* Setup mode and clock, etc (spi driver may call many times). | 272 | /* Setup mode and clock, etc (spi driver may call many times). |
| 266 | * | 273 | * |
| 267 | * IMPORTANT: this may be called when transfers to another | 274 | * IMPORTANT: this may be called when transfers to another |
| @@ -542,6 +549,8 @@ static inline void spi_message_free(struct spi_message *m) | |||
| 542 | 549 | ||
| 543 | extern int spi_setup(struct spi_device *spi); | 550 | extern int spi_setup(struct spi_device *spi); |
| 544 | extern int spi_async(struct spi_device *spi, struct spi_message *message); | 551 | extern int spi_async(struct spi_device *spi, struct spi_message *message); |
| 552 | extern int spi_async_locked(struct spi_device *spi, | ||
| 553 | struct spi_message *message); | ||
| 545 | 554 | ||
| 546 | /*---------------------------------------------------------------------------*/ | 555 | /*---------------------------------------------------------------------------*/ |
| 547 | 556 | ||
| @@ -551,6 +560,9 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message); | |||
| 551 | */ | 560 | */ |
| 552 | 561 | ||
| 553 | extern int spi_sync(struct spi_device *spi, struct spi_message *message); | 562 | extern int spi_sync(struct spi_device *spi, struct spi_message *message); |
| 563 | extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); | ||
| 564 | extern int spi_bus_lock(struct spi_master *master); | ||
| 565 | extern int spi_bus_unlock(struct spi_master *master); | ||
| 554 | 566 | ||
| 555 | /** | 567 | /** |
| 556 | * spi_write - SPI synchronous write | 568 | * spi_write - SPI synchronous write |
