diff options
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 239 |
1 files changed, 200 insertions, 39 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 1bb1b88780ce..b5a78a1f4421 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/cache.h> | 24 | #include <linux/cache.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/of_device.h> | ||
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | #include <linux/mod_devicetable.h> | 28 | #include <linux/mod_devicetable.h> |
28 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) | |||
86 | const struct spi_device *spi = to_spi_device(dev); | 87 | const struct spi_device *spi = to_spi_device(dev); |
87 | const struct spi_driver *sdrv = to_spi_driver(drv); | 88 | const struct spi_driver *sdrv = to_spi_driver(drv); |
88 | 89 | ||
90 | /* Attempt an OF style match */ | ||
91 | if (of_driver_match_device(dev, drv)) | ||
92 | return 1; | ||
93 | |||
89 | if (sdrv->id_table) | 94 | if (sdrv->id_table) |
90 | return !!spi_match_id(sdrv->id_table, spi); | 95 | return !!spi_match_id(sdrv->id_table, spi); |
91 | 96 | ||
@@ -528,6 +533,10 @@ int spi_register_master(struct spi_master *master) | |||
528 | dynamic = 1; | 533 | dynamic = 1; |
529 | } | 534 | } |
530 | 535 | ||
536 | spin_lock_init(&master->bus_lock_spinlock); | ||
537 | mutex_init(&master->bus_lock_mutex); | ||
538 | master->bus_lock_flag = 0; | ||
539 | |||
531 | /* register the device, then userspace will see it. | 540 | /* register the device, then userspace will see it. |
532 | * registration fails if the bus ID is in use. | 541 | * registration fails if the bus ID is in use. |
533 | */ | 542 | */ |
@@ -550,11 +559,9 @@ done: | |||
550 | EXPORT_SYMBOL_GPL(spi_register_master); | 559 | EXPORT_SYMBOL_GPL(spi_register_master); |
551 | 560 | ||
552 | 561 | ||
553 | static int __unregister(struct device *dev, void *master_dev) | 562 | static int __unregister(struct device *dev, void *null) |
554 | { | 563 | { |
555 | /* note: before about 2.6.14-rc1 this would corrupt memory: */ | 564 | spi_unregister_device(to_spi_device(dev)); |
556 | if (dev != master_dev) | ||
557 | spi_unregister_device(to_spi_device(dev)); | ||
558 | return 0; | 565 | return 0; |
559 | } | 566 | } |
560 | 567 | ||
@@ -572,8 +579,7 @@ void spi_unregister_master(struct spi_master *master) | |||
572 | { | 579 | { |
573 | int dummy; | 580 | int dummy; |
574 | 581 | ||
575 | dummy = device_for_each_child(master->dev.parent, &master->dev, | 582 | dummy = device_for_each_child(&master->dev, NULL, __unregister); |
576 | __unregister); | ||
577 | device_unregister(&master->dev); | 583 | device_unregister(&master->dev); |
578 | } | 584 | } |
579 | EXPORT_SYMBOL_GPL(spi_unregister_master); | 585 | EXPORT_SYMBOL_GPL(spi_unregister_master); |
@@ -670,6 +676,35 @@ int spi_setup(struct spi_device *spi) | |||
670 | } | 676 | } |
671 | EXPORT_SYMBOL_GPL(spi_setup); | 677 | EXPORT_SYMBOL_GPL(spi_setup); |
672 | 678 | ||
679 | static int __spi_async(struct spi_device *spi, struct spi_message *message) | ||
680 | { | ||
681 | struct spi_master *master = spi->master; | ||
682 | |||
683 | /* Half-duplex links include original MicroWire, and ones with | ||
684 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
685 | * either MOSI or MISO is missing. They can also be caused by | ||
686 | * software limitations. | ||
687 | */ | ||
688 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
689 | || (spi->mode & SPI_3WIRE)) { | ||
690 | struct spi_transfer *xfer; | ||
691 | unsigned flags = master->flags; | ||
692 | |||
693 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | ||
694 | if (xfer->rx_buf && xfer->tx_buf) | ||
695 | return -EINVAL; | ||
696 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | ||
697 | return -EINVAL; | ||
698 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
699 | return -EINVAL; | ||
700 | } | ||
701 | } | ||
702 | |||
703 | message->spi = spi; | ||
704 | message->status = -EINPROGRESS; | ||
705 | return master->transfer(spi, message); | ||
706 | } | ||
707 | |||
673 | /** | 708 | /** |
674 | * spi_async - asynchronous SPI transfer | 709 | * spi_async - asynchronous SPI transfer |
675 | * @spi: device with which data will be exchanged | 710 | * @spi: device with which data will be exchanged |
@@ -702,33 +737,68 @@ EXPORT_SYMBOL_GPL(spi_setup); | |||
702 | int spi_async(struct spi_device *spi, struct spi_message *message) | 737 | int spi_async(struct spi_device *spi, struct spi_message *message) |
703 | { | 738 | { |
704 | struct spi_master *master = spi->master; | 739 | struct spi_master *master = spi->master; |
740 | int ret; | ||
741 | unsigned long flags; | ||
705 | 742 | ||
706 | /* Half-duplex links include original MicroWire, and ones with | 743 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); |
707 | * only one data pin like SPI_3WIRE (switches direction) or where | ||
708 | * either MOSI or MISO is missing. They can also be caused by | ||
709 | * software limitations. | ||
710 | */ | ||
711 | if ((master->flags & SPI_MASTER_HALF_DUPLEX) | ||
712 | || (spi->mode & SPI_3WIRE)) { | ||
713 | struct spi_transfer *xfer; | ||
714 | unsigned flags = master->flags; | ||
715 | 744 | ||
716 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | 745 | if (master->bus_lock_flag) |
717 | if (xfer->rx_buf && xfer->tx_buf) | 746 | ret = -EBUSY; |
718 | return -EINVAL; | 747 | else |
719 | if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) | 748 | ret = __spi_async(spi, message); |
720 | return -EINVAL; | ||
721 | if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | } | ||
725 | 749 | ||
726 | message->spi = spi; | 750 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); |
727 | message->status = -EINPROGRESS; | 751 | |
728 | return master->transfer(spi, message); | 752 | return ret; |
729 | } | 753 | } |
730 | EXPORT_SYMBOL_GPL(spi_async); | 754 | EXPORT_SYMBOL_GPL(spi_async); |
731 | 755 | ||
756 | /** | ||
757 | * spi_async_locked - version of spi_async with exclusive bus usage | ||
758 | * @spi: device with which data will be exchanged | ||
759 | * @message: describes the data transfers, including completion callback | ||
760 | * Context: any (irqs may be blocked, etc) | ||
761 | * | ||
762 | * This call may be used in_irq and other contexts which can't sleep, | ||
763 | * as well as from task contexts which can sleep. | ||
764 | * | ||
765 | * The completion callback is invoked in a context which can't sleep. | ||
766 | * Before that invocation, the value of message->status is undefined. | ||
767 | * When the callback is issued, message->status holds either zero (to | ||
768 | * indicate complete success) or a negative error code. After that | ||
769 | * callback returns, the driver which issued the transfer request may | ||
770 | * deallocate the associated memory; it's no longer in use by any SPI | ||
771 | * core or controller driver code. | ||
772 | * | ||
773 | * Note that although all messages to a spi_device are handled in | ||
774 | * FIFO order, messages may go to different devices in other orders. | ||
775 | * Some device might be higher priority, or have various "hard" access | ||
776 | * time requirements, for example. | ||
777 | * | ||
778 | * On detection of any fault during the transfer, processing of | ||
779 | * the entire message is aborted, and the device is deselected. | ||
780 | * Until returning from the associated message completion callback, | ||
781 | * no other spi_message queued to that device will be processed. | ||
782 | * (This rule applies equally to all the synchronous transfer calls, | ||
783 | * which are wrappers around this core asynchronous primitive.) | ||
784 | */ | ||
785 | int spi_async_locked(struct spi_device *spi, struct spi_message *message) | ||
786 | { | ||
787 | struct spi_master *master = spi->master; | ||
788 | int ret; | ||
789 | unsigned long flags; | ||
790 | |||
791 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
792 | |||
793 | ret = __spi_async(spi, message); | ||
794 | |||
795 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
796 | |||
797 | return ret; | ||
798 | |||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(spi_async_locked); | ||
801 | |||
732 | 802 | ||
733 | /*-------------------------------------------------------------------------*/ | 803 | /*-------------------------------------------------------------------------*/ |
734 | 804 | ||
@@ -742,6 +812,32 @@ static void spi_complete(void *arg) | |||
742 | complete(arg); | 812 | complete(arg); |
743 | } | 813 | } |
744 | 814 | ||
815 | static int __spi_sync(struct spi_device *spi, struct spi_message *message, | ||
816 | int bus_locked) | ||
817 | { | ||
818 | DECLARE_COMPLETION_ONSTACK(done); | ||
819 | int status; | ||
820 | struct spi_master *master = spi->master; | ||
821 | |||
822 | message->complete = spi_complete; | ||
823 | message->context = &done; | ||
824 | |||
825 | if (!bus_locked) | ||
826 | mutex_lock(&master->bus_lock_mutex); | ||
827 | |||
828 | status = spi_async_locked(spi, message); | ||
829 | |||
830 | if (!bus_locked) | ||
831 | mutex_unlock(&master->bus_lock_mutex); | ||
832 | |||
833 | if (status == 0) { | ||
834 | wait_for_completion(&done); | ||
835 | status = message->status; | ||
836 | } | ||
837 | message->context = NULL; | ||
838 | return status; | ||
839 | } | ||
840 | |||
745 | /** | 841 | /** |
746 | * spi_sync - blocking/synchronous SPI data transfers | 842 | * spi_sync - blocking/synchronous SPI data transfers |
747 | * @spi: device with which data will be exchanged | 843 | * @spi: device with which data will be exchanged |
@@ -765,21 +861,86 @@ static void spi_complete(void *arg) | |||
765 | */ | 861 | */ |
766 | int spi_sync(struct spi_device *spi, struct spi_message *message) | 862 | int spi_sync(struct spi_device *spi, struct spi_message *message) |
767 | { | 863 | { |
768 | DECLARE_COMPLETION_ONSTACK(done); | 864 | return __spi_sync(spi, message, 0); |
769 | int status; | ||
770 | |||
771 | message->complete = spi_complete; | ||
772 | message->context = &done; | ||
773 | status = spi_async(spi, message); | ||
774 | if (status == 0) { | ||
775 | wait_for_completion(&done); | ||
776 | status = message->status; | ||
777 | } | ||
778 | message->context = NULL; | ||
779 | return status; | ||
780 | } | 865 | } |
781 | EXPORT_SYMBOL_GPL(spi_sync); | 866 | EXPORT_SYMBOL_GPL(spi_sync); |
782 | 867 | ||
868 | /** | ||
869 | * spi_sync_locked - version of spi_sync with exclusive bus usage | ||
870 | * @spi: device with which data will be exchanged | ||
871 | * @message: describes the data transfers | ||
872 | * Context: can sleep | ||
873 | * | ||
874 | * This call may only be used from a context that may sleep. The sleep | ||
875 | * is non-interruptible, and has no timeout. Low-overhead controller | ||
876 | * drivers may DMA directly into and out of the message buffers. | ||
877 | * | ||
878 | * This call should be used by drivers that require exclusive access to the | ||
879 | * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must | ||
880 | * be released by a spi_bus_unlock call when the exclusive access is over. | ||
881 | * | ||
882 | * It returns zero on success, else a negative error code. | ||
883 | */ | ||
884 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) | ||
885 | { | ||
886 | return __spi_sync(spi, message, 1); | ||
887 | } | ||
888 | EXPORT_SYMBOL_GPL(spi_sync_locked); | ||
889 | |||
890 | /** | ||
891 | * spi_bus_lock - obtain a lock for exclusive SPI bus usage | ||
892 | * @master: SPI bus master that should be locked for exclusive bus access | ||
893 | * Context: can sleep | ||
894 | * | ||
895 | * This call may only be used from a context that may sleep. The sleep | ||
896 | * is non-interruptible, and has no timeout. | ||
897 | * | ||
898 | * This call should be used by drivers that require exclusive access to the | ||
899 | * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the | ||
900 | * exclusive access is over. Data transfer must be done by spi_sync_locked | ||
901 | * and spi_async_locked calls when the SPI bus lock is held. | ||
902 | * | ||
903 | * It returns zero on success, else a negative error code. | ||
904 | */ | ||
905 | int spi_bus_lock(struct spi_master *master) | ||
906 | { | ||
907 | unsigned long flags; | ||
908 | |||
909 | mutex_lock(&master->bus_lock_mutex); | ||
910 | |||
911 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
912 | master->bus_lock_flag = 1; | ||
913 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
914 | |||
915 | /* mutex remains locked until spi_bus_unlock is called */ | ||
916 | |||
917 | return 0; | ||
918 | } | ||
919 | EXPORT_SYMBOL_GPL(spi_bus_lock); | ||
920 | |||
921 | /** | ||
922 | * spi_bus_unlock - release the lock for exclusive SPI bus usage | ||
923 | * @master: SPI bus master that was locked for exclusive bus access | ||
924 | * Context: can sleep | ||
925 | * | ||
926 | * This call may only be used from a context that may sleep. The sleep | ||
927 | * is non-interruptible, and has no timeout. | ||
928 | * | ||
929 | * This call releases an SPI bus lock previously obtained by an spi_bus_lock | ||
930 | * call. | ||
931 | * | ||
932 | * It returns zero on success, else a negative error code. | ||
933 | */ | ||
934 | int spi_bus_unlock(struct spi_master *master) | ||
935 | { | ||
936 | master->bus_lock_flag = 0; | ||
937 | |||
938 | mutex_unlock(&master->bus_lock_mutex); | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | EXPORT_SYMBOL_GPL(spi_bus_unlock); | ||
943 | |||
783 | /* portable code must never pass more than 32 bytes */ | 944 | /* portable code must never pass more than 32 bytes */ |
784 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) | 945 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) |
785 | 946 | ||