diff options
author | Ernst Schwab <eschwab@online.de> | 2010-06-28 20:49:29 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-06-28 20:49:29 -0400 |
commit | cf32b71e981ca63e8f349d8585ca2a3583b556e0 (patch) | |
tree | e704942f6843114446c73478a79e615a57d2eb49 /drivers | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
spi/mmc_spi: SPI bus locking API, using mutex
SPI bus locking API to allow exclusive access to the SPI bus, especially, but
not limited to, for the mmc_spi driver.
Coded according to an outline from Grant Likely; here is his
specification (accidentally swapped function names corrected):
It requires 3 things to be added to struct spi_master.
- 1 Mutex
- 1 spin lock
- 1 flag.
The mutex protects spi_sync, and provides sleeping "for free"
The spinlock protects the atomic spi_async call.
The flag is set when the lock is obtained, and checked while holding
the spinlock in spi_async(). If the flag is checked, then spi_async()
must fail immediately.
The current runtime API looks like this:
spi_async(struct spi_device*, struct spi_message*);
spi_sync(struct spi_device*, struct spi_message*);
The API needs to be extended to this:
spi_async(struct spi_device*, struct spi_message*)
spi_sync(struct spi_device*, struct spi_message*)
spi_bus_lock(struct spi_master*) /* although struct spi_device* might
be easier */
spi_bus_unlock(struct spi_master*)
spi_async_locked(struct spi_device*, struct spi_message*)
spi_sync_locked(struct spi_device*, struct spi_message*)
Drivers can only call the last two if they already hold the spi_master_lock().
spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
flag, and releases the spin lock before returning. It doesn't even
need to sleep while waiting for "in-flight" spi_transactions to
complete because its purpose is to guarantee no additional
transactions are added. It does not guarantee that the bus is idle.
spi_bus_unlock() clears the flag and releases the mutex, which will
wake up any waiters.
The difference between spi_async() and spi_async_locked() is that the
locked version bypasses the check of the lock flag. Both versions
need to obtain the spinlock.
The difference between spi_sync() and spi_sync_locked() is that
spi_sync() must hold the mutex while enqueuing a new transfer.
spi_sync_locked() doesn't because the mutex is already held. Note
however that spi_sync must *not* continue to hold the mutex while
waiting for the transfer to complete, otherwise only one transfer
could be queued up at a time!
Almost no code needs to be written. The current spi_async() and
spi_sync() can probably be renamed to __spi_async() and __spi_sync()
so that spi_async(), spi_sync(), spi_async_locked() and
spi_sync_locked() can just become wrappers around the common code.
spi_sync() is protected by a mutex because it can sleep
spi_async() needs to be protected with a flag and a spinlock because
it can be called atomically and must not sleep
Signed-off-by: Ernst Schwab <eschwab@online.de>
[grant.likely@secretlab.ca: use spin_lock_irqsave()]
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Tested-by: Matt Fleming <matt@console-pimps.org>
Tested-by: Antonio Ospite <ospite@studenti.unina.it>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/spi.c | 225 |
1 files changed, 192 insertions, 33 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b3a1f9259b62..fdde7061ef58 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -527,6 +527,10 @@ int spi_register_master(struct spi_master *master) | |||
527 | dynamic = 1; | 527 | dynamic = 1; |
528 | } | 528 | } |
529 | 529 | ||
530 | spin_lock_init(&master->bus_lock_spinlock); | ||
531 | mutex_init(&master->bus_lock_mutex); | ||
532 | master->bus_lock_flag = 0; | ||
533 | |||
530 | /* register the device, then userspace will see it. | 534 | /* register the device, then userspace will see it. |
531 | * registration fails if the bus ID is in use. | 535 | * registration fails if the bus ID is in use. |
532 | */ | 536 | */ |
@@ -666,6 +670,35 @@ int spi_setup(struct spi_device *spi) | |||
666 | } | 670 | } |
667 | EXPORT_SYMBOL_GPL(spi_setup); | 671 | EXPORT_SYMBOL_GPL(spi_setup); |
668 | 672 | ||
673 | static int __spi_async(struct spi_device *spi, struct spi_message *message) | ||
674 | { | ||
675 | struct spi_master *master = spi->master; | ||
676 | |||
677 | /* Half-duplex links include original MicroWire, and ones with | ||
678 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
679 | * either MOSI or MISO is missing. They can also be caused by | ||
680 | * software limitations. | ||
681 | */ | ||
682 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
683 | || (spi->mode & SPI_3WIRE)) { | ||
684 | struct spi_transfer *xfer; | ||
685 | unsigned flags = master->flags; | ||
686 | |||
687 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | ||
688 | if (xfer->rx_buf && xfer->tx_buf) | ||
689 | return -EINVAL; | ||
690 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | ||
691 | return -EINVAL; | ||
692 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
693 | return -EINVAL; | ||
694 | } | ||
695 | } | ||
696 | |||
697 | message->spi = spi; | ||
698 | message->status = -EINPROGRESS; | ||
699 | return master->transfer(spi, message); | ||
700 | } | ||
701 | |||
669 | /** | 702 | /** |
670 | * spi_async - asynchronous SPI transfer | 703 | * spi_async - asynchronous SPI transfer |
671 | * @spi: device with which data will be exchanged | 704 | * @spi: device with which data will be exchanged |
@@ -698,33 +731,68 @@ EXPORT_SYMBOL_GPL(spi_setup); | |||
698 | int spi_async(struct spi_device *spi, struct spi_message *message) | 731 | int spi_async(struct spi_device *spi, struct spi_message *message) |
699 | { | 732 | { |
700 | struct spi_master *master = spi->master; | 733 | struct spi_master *master = spi->master; |
734 | int ret; | ||
735 | unsigned long flags; | ||
701 | 736 | ||
702 | /* Half-duplex links include original MicroWire, and ones with | 737 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); |
703 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
704 | * either MOSI or MISO is missing. They can also be caused by | ||
705 | * software limitations. | ||
706 | */ | ||
707 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
708 | || (spi->mode & SPI_3WIRE)) { | ||
709 | struct spi_transfer *xfer; | ||
710 | unsigned flags = master->flags; | ||
711 | 738 | ||
712 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | 739 | if (master->bus_lock_flag) |
713 | if (xfer->rx_buf && xfer->tx_buf) | 740 | ret = -EBUSY; |
714 | return -EINVAL; | 741 | else |
715 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | 742 | ret = __spi_async(spi, message); |
716 | return -EINVAL; | ||
717 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
718 | return -EINVAL; | ||
719 | } | ||
720 | } | ||
721 | 743 | ||
722 | message->spi = spi; | 744 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); |
723 | message->status = -EINPROGRESS; | 745 | |
724 | return master->transfer(spi, message); | 746 | return ret; |
725 | } | 747 | } |
726 | EXPORT_SYMBOL_GPL(spi_async); | 748 | EXPORT_SYMBOL_GPL(spi_async); |
727 | 749 | ||
750 | /** | ||
751 | * spi_async_locked - version of spi_async with exclusive bus usage | ||
752 | * @spi: device with which data will be exchanged | ||
753 | * @message: describes the data transfers, including completion callback | ||
754 | * Context: any (irqs may be blocked, etc) | ||
755 | * | ||
756 | * This call may be used in_irq and other contexts which can't sleep, | ||
757 | * as well as from task contexts which can sleep. | ||
758 | * | ||
759 | * The completion callback is invoked in a context which can't sleep. | ||
760 | * Before that invocation, the value of message->status is undefined. | ||
761 | * When the callback is issued, message->status holds either zero (to | ||
762 | * indicate complete success) or a negative error code. After that | ||
763 | * callback returns, the driver which issued the transfer request may | ||
764 | * deallocate the associated memory; it's no longer in use by any SPI | ||
765 | * core or controller driver code. | ||
766 | * | ||
767 | * Note that although all messages to a spi_device are handled in | ||
768 | * FIFO order, messages may go to different devices in other orders. | ||
769 | * Some device might be higher priority, or have various "hard" access | ||
770 | * time requirements, for example. | ||
771 | * | ||
772 | * On detection of any fault during the transfer, processing of | ||
773 | * the entire message is aborted, and the device is deselected. | ||
774 | * Until returning from the associated message completion callback, | ||
775 | * no other spi_message queued to that device will be processed. | ||
776 | * (This rule applies equally to all the synchronous transfer calls, | ||
777 | * which are wrappers around this core asynchronous primitive.) | ||
778 | */ | ||
779 | int spi_async_locked(struct spi_device *spi, struct spi_message *message) | ||
780 | { | ||
781 | struct spi_master *master = spi->master; | ||
782 | int ret; | ||
783 | unsigned long flags; | ||
784 | |||
785 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
786 | |||
787 | ret = __spi_async(spi, message); | ||
788 | |||
789 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
790 | |||
791 | return ret; | ||
792 | |||
793 | } | ||
794 | EXPORT_SYMBOL_GPL(spi_async_locked); | ||
795 | |||
728 | 796 | ||
729 | /*-------------------------------------------------------------------------*/ | 797 | /*-------------------------------------------------------------------------*/ |
730 | 798 | ||
@@ -738,6 +806,32 @@ static void spi_complete(void *arg) | |||
738 | complete(arg); | 806 | complete(arg); |
739 | } | 807 | } |
740 | 808 | ||
809 | static int __spi_sync(struct spi_device *spi, struct spi_message *message, | ||
810 | int bus_locked) | ||
811 | { | ||
812 | DECLARE_COMPLETION_ONSTACK(done); | ||
813 | int status; | ||
814 | struct spi_master *master = spi->master; | ||
815 | |||
816 | message->complete = spi_complete; | ||
817 | message->context = &done; | ||
818 | |||
819 | if (!bus_locked) | ||
820 | mutex_lock(&master->bus_lock_mutex); | ||
821 | |||
822 | status = spi_async_locked(spi, message); | ||
823 | |||
824 | if (!bus_locked) | ||
825 | mutex_unlock(&master->bus_lock_mutex); | ||
826 | |||
827 | if (status == 0) { | ||
828 | wait_for_completion(&done); | ||
829 | status = message->status; | ||
830 | } | ||
831 | message->context = NULL; | ||
832 | return status; | ||
833 | } | ||
834 | |||
741 | /** | 835 | /** |
742 | * spi_sync - blocking/synchronous SPI data transfers | 836 | * spi_sync - blocking/synchronous SPI data transfers |
743 | * @spi: device with which data will be exchanged | 837 | * @spi: device with which data will be exchanged |
@@ -761,21 +855,86 @@ static void spi_complete(void *arg) | |||
761 | */ | 855 | */ |
762 | int spi_sync(struct spi_device *spi, struct spi_message *message) | 856 | int spi_sync(struct spi_device *spi, struct spi_message *message) |
763 | { | 857 | { |
764 | DECLARE_COMPLETION_ONSTACK(done); | 858 | return __spi_sync(spi, message, 0); |
765 | int status; | ||
766 | |||
767 | message->complete = spi_complete; | ||
768 | message->context = &done; | ||
769 | status = spi_async(spi, message); | ||
770 | if (status == 0) { | ||
771 | wait_for_completion(&done); | ||
772 | status = message->status; | ||
773 | } | ||
774 | message->context = NULL; | ||
775 | return status; | ||
776 | } | 859 | } |
777 | EXPORT_SYMBOL_GPL(spi_sync); | 860 | EXPORT_SYMBOL_GPL(spi_sync); |
778 | 861 | ||
862 | /** | ||
863 | * spi_sync_locked - version of spi_sync with exclusive bus usage | ||
864 | * @spi: device with which data will be exchanged | ||
865 | * @message: describes the data transfers | ||
866 | * Context: can sleep | ||
867 | * | ||
868 | * This call may only be used from a context that may sleep. The sleep | ||
869 | * is non-interruptible, and has no timeout. Low-overhead controller | ||
870 | * drivers may DMA directly into and out of the message buffers. | ||
871 | * | ||
872 | * This call should be used by drivers that require exclusive access to the | ||
873 | * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must | ||
874 | * be released by a spi_bus_unlock call when the exclusive access is over. | ||
875 | * | ||
876 | * It returns zero on success, else a negative error code. | ||
877 | */ | ||
878 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) | ||
879 | { | ||
880 | return __spi_sync(spi, message, 1); | ||
881 | } | ||
882 | EXPORT_SYMBOL_GPL(spi_sync_locked); | ||
883 | |||
884 | /** | ||
885 | * spi_bus_lock - obtain a lock for exclusive SPI bus usage | ||
886 | * @master: SPI bus master that should be locked for exclusive bus access | ||
887 | * Context: can sleep | ||
888 | * | ||
889 | * This call may only be used from a context that may sleep. The sleep | ||
890 | * is non-interruptible, and has no timeout. | ||
891 | * | ||
892 | * This call should be used by drivers that require exclusive access to the | ||
893 | * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the | ||
894 | * exclusive access is over. Data transfer must be done by spi_sync_locked | ||
895 | * and spi_async_locked calls when the SPI bus lock is held. | ||
896 | * | ||
897 | * It returns zero on success, else a negative error code. | ||
898 | */ | ||
899 | int spi_bus_lock(struct spi_master *master) | ||
900 | { | ||
901 | unsigned long flags; | ||
902 | |||
903 | mutex_lock(&master->bus_lock_mutex); | ||
904 | |||
905 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
906 | master->bus_lock_flag = 1; | ||
907 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
908 | |||
909 | /* mutex remains locked until spi_bus_unlock is called */ | ||
910 | |||
911 | return 0; | ||
912 | } | ||
913 | EXPORT_SYMBOL_GPL(spi_bus_lock); | ||
914 | |||
915 | /** | ||
916 | * spi_bus_unlock - release the lock for exclusive SPI bus usage | ||
917 | * @master: SPI bus master that was locked for exclusive bus access | ||
918 | * Context: can sleep | ||
919 | * | ||
920 | * This call may only be used from a context that may sleep. The sleep | ||
921 | * is non-interruptible, and has no timeout. | ||
922 | * | ||
923 | * This call releases an SPI bus lock previously obtained by an spi_bus_lock | ||
924 | * call. | ||
925 | * | ||
926 | * It returns zero on success, else a negative error code. | ||
927 | */ | ||
928 | int spi_bus_unlock(struct spi_master *master) | ||
929 | { | ||
930 | master->bus_lock_flag = 0; | ||
931 | |||
932 | mutex_unlock(&master->bus_lock_mutex); | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | EXPORT_SYMBOL_GPL(spi_bus_unlock); | ||
937 | |||
779 | /* portable code must never pass more than 32 bytes */ | 938 | /* portable code must never pass more than 32 bytes */ |
780 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) | 939 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) |
781 | 940 | ||