diff options
author | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2009-01-06 11:20:49 -0500 |
---|---|---|
committer | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2009-01-06 11:20:49 -0500 |
commit | 5b31f855f10d0053e738baa6d91fb6a3fad35119 (patch) | |
tree | bdd8125faf6e4b8baf37d86270389b4d6f62f37d /drivers/ide | |
parent | efe0397eef544ac4bcca23d39aa8d5db154952e0 (diff) |
ide: use lock bitops for ports serialization (v2)
* Add ->host_busy field to struct ide_host and use it's first bit
together with lock bitops to provide new ports serialization method.
* Convert core IDE code to use new ide_[un]lock_host() helpers.
This removes the need for taking hwgroup->lock if host is already
busy on serialized hosts and makes it possible to merge ide_hwgroup_t
into ide_hwif_t (done in the later patch).
* Remove no longer needed ide_hwgroup_t.busy and ide_[un]lock_hwgroup().
* Update do_ide_request() documentation.
v2:
* ide_release_lock() should be called inside IDE_HFLAG_SERIALIZE check.
* Add ide_hwif_t.busy flag and ide_[un]lock_port() for serializing
devices on a port.
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide')
-rw-r--r-- | drivers/ide/ide-io.c | 105 |
1 files changed, 63 insertions, 42 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 4ce793c05629..8f371821c614 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -666,45 +666,54 @@ void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) | |||
666 | } | 666 | } |
667 | EXPORT_SYMBOL(ide_stall_queue); | 667 | EXPORT_SYMBOL(ide_stall_queue); |
668 | 668 | ||
669 | static inline int ide_lock_port(ide_hwif_t *hwif) | ||
670 | { | ||
671 | if (hwif->busy) | ||
672 | return 1; | ||
673 | |||
674 | hwif->busy = 1; | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | static inline void ide_unlock_port(ide_hwif_t *hwif) | ||
680 | { | ||
681 | hwif->busy = 0; | ||
682 | } | ||
683 | |||
684 | static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) | ||
685 | { | ||
686 | int rc = 0; | ||
687 | |||
688 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { | ||
689 | rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); | ||
690 | if (rc == 0) { | ||
691 | /* for atari only */ | ||
692 | ide_get_lock(ide_intr, hwif); | ||
693 | } | ||
694 | } | ||
695 | return rc; | ||
696 | } | ||
697 | |||
698 | static inline void ide_unlock_host(struct ide_host *host) | ||
699 | { | ||
700 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { | ||
701 | /* for atari only */ | ||
702 | ide_release_lock(); | ||
703 | clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); | ||
704 | } | ||
705 | } | ||
706 | |||
669 | /* | 707 | /* |
670 | * Issue a new request to a drive from hwgroup | 708 | * Issue a new request to a drive from hwgroup |
671 | * | ||
672 | * A hwgroup is a serialized group of IDE interfaces. Usually there is | ||
673 | * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) | ||
674 | * may have both interfaces in a single hwgroup to "serialize" access. | ||
675 | * Or possibly multiple ISA interfaces can share a common IRQ by being grouped | ||
676 | * together into one hwgroup for serialized access. | ||
677 | * | ||
678 | * Note also that several hwgroups can end up sharing a single IRQ, | ||
679 | * possibly along with many other devices. This is especially common in | ||
680 | * PCI-based systems with off-board IDE controller cards. | ||
681 | * | ||
682 | * The IDE driver uses a per-hwgroup lock to protect the hwgroup->busy flag. | ||
683 | * | ||
684 | * The first thread into the driver for a particular hwgroup sets the | ||
685 | * hwgroup->busy flag to indicate that this hwgroup is now active, | ||
686 | * and then initiates processing of the top request from the request queue. | ||
687 | * | ||
688 | * Other threads attempting entry notice the busy setting, and will simply | ||
689 | * queue their new requests and exit immediately. Note that hwgroup->busy | ||
690 | * remains set even when the driver is merely awaiting the next interrupt. | ||
691 | * Thus, the meaning is "this hwgroup is busy processing a request". | ||
692 | * | ||
693 | * When processing of a request completes, the completing thread or IRQ-handler | ||
694 | * will start the next request from the queue. If no more work remains, | ||
695 | * the driver will clear the hwgroup->busy flag and exit. | ||
696 | * | ||
697 | * The per-hwgroup spinlock is used to protect all access to the | ||
698 | * hwgroup->busy flag, but is otherwise not needed for most processing in | ||
699 | * the driver. This makes the driver much more friendlier to shared IRQs | ||
700 | * than previous designs, while remaining 100% (?) SMP safe and capable. | ||
701 | */ | 709 | */ |
702 | void do_ide_request(struct request_queue *q) | 710 | void do_ide_request(struct request_queue *q) |
703 | { | 711 | { |
704 | ide_drive_t *drive = q->queuedata; | 712 | ide_drive_t *drive = q->queuedata; |
705 | ide_hwif_t *hwif = drive->hwif; | 713 | ide_hwif_t *hwif = drive->hwif; |
714 | struct ide_host *host = hwif->host; | ||
706 | ide_hwgroup_t *hwgroup = hwif->hwgroup; | 715 | ide_hwgroup_t *hwgroup = hwif->hwgroup; |
707 | struct request *rq; | 716 | struct request *rq = NULL; |
708 | ide_startstop_t startstop; | 717 | ide_startstop_t startstop; |
709 | 718 | ||
710 | /* | 719 | /* |
@@ -721,9 +730,13 @@ void do_ide_request(struct request_queue *q) | |||
721 | blk_remove_plug(q); | 730 | blk_remove_plug(q); |
722 | 731 | ||
723 | spin_unlock_irq(q->queue_lock); | 732 | spin_unlock_irq(q->queue_lock); |
733 | |||
734 | if (ide_lock_host(host, hwif)) | ||
735 | goto plug_device_2; | ||
736 | |||
724 | spin_lock_irq(&hwgroup->lock); | 737 | spin_lock_irq(&hwgroup->lock); |
725 | 738 | ||
726 | if (!ide_lock_hwgroup(hwgroup, hwif)) { | 739 | if (!ide_lock_port(hwif)) { |
727 | ide_hwif_t *prev_port; | 740 | ide_hwif_t *prev_port; |
728 | repeat: | 741 | repeat: |
729 | prev_port = hwif->host->cur_port; | 742 | prev_port = hwif->host->cur_port; |
@@ -731,7 +744,7 @@ repeat: | |||
731 | 744 | ||
732 | if (drive->dev_flags & IDE_DFLAG_SLEEPING) { | 745 | if (drive->dev_flags & IDE_DFLAG_SLEEPING) { |
733 | if (time_before(drive->sleep, jiffies)) { | 746 | if (time_before(drive->sleep, jiffies)) { |
734 | ide_unlock_hwgroup(hwgroup); | 747 | ide_unlock_port(hwif); |
735 | goto plug_device; | 748 | goto plug_device; |
736 | } | 749 | } |
737 | } | 750 | } |
@@ -761,7 +774,7 @@ repeat: | |||
761 | spin_lock_irq(&hwgroup->lock); | 774 | spin_lock_irq(&hwgroup->lock); |
762 | 775 | ||
763 | if (!rq) { | 776 | if (!rq) { |
764 | ide_unlock_hwgroup(hwgroup); | 777 | ide_unlock_port(hwif); |
765 | goto out; | 778 | goto out; |
766 | } | 779 | } |
767 | 780 | ||
@@ -782,7 +795,7 @@ repeat: | |||
782 | blk_pm_request(rq) == 0 && | 795 | blk_pm_request(rq) == 0 && |
783 | (rq->cmd_flags & REQ_PREEMPT) == 0) { | 796 | (rq->cmd_flags & REQ_PREEMPT) == 0) { |
784 | /* there should be no pending command at this point */ | 797 | /* there should be no pending command at this point */ |
785 | ide_unlock_hwgroup(hwgroup); | 798 | ide_unlock_port(hwif); |
786 | goto plug_device; | 799 | goto plug_device; |
787 | } | 800 | } |
788 | 801 | ||
@@ -798,11 +811,15 @@ repeat: | |||
798 | goto plug_device; | 811 | goto plug_device; |
799 | out: | 812 | out: |
800 | spin_unlock_irq(&hwgroup->lock); | 813 | spin_unlock_irq(&hwgroup->lock); |
814 | if (rq == NULL) | ||
815 | ide_unlock_host(host); | ||
801 | spin_lock_irq(q->queue_lock); | 816 | spin_lock_irq(q->queue_lock); |
802 | return; | 817 | return; |
803 | 818 | ||
804 | plug_device: | 819 | plug_device: |
805 | spin_unlock_irq(&hwgroup->lock); | 820 | spin_unlock_irq(&hwgroup->lock); |
821 | ide_unlock_host(host); | ||
822 | plug_device_2: | ||
806 | spin_lock_irq(q->queue_lock); | 823 | spin_lock_irq(q->queue_lock); |
807 | 824 | ||
808 | if (!elv_queue_empty(q)) | 825 | if (!elv_queue_empty(q)) |
@@ -844,9 +861,9 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | |||
844 | ide_dma_off_quietly(drive); | 861 | ide_dma_off_quietly(drive); |
845 | 862 | ||
846 | /* | 863 | /* |
847 | * un-busy drive etc (hwgroup->busy is cleared on return) and | 864 | * un-busy drive etc and make sure request is sane |
848 | * make sure request is sane | ||
849 | */ | 865 | */ |
866 | |||
850 | rq = HWGROUP(drive)->rq; | 867 | rq = HWGROUP(drive)->rq; |
851 | 868 | ||
852 | if (!rq) | 869 | if (!rq) |
@@ -895,6 +912,7 @@ static void ide_plug_device(ide_drive_t *drive) | |||
895 | void ide_timer_expiry (unsigned long data) | 912 | void ide_timer_expiry (unsigned long data) |
896 | { | 913 | { |
897 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; | 914 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; |
915 | ide_hwif_t *uninitialized_var(hwif); | ||
898 | ide_drive_t *uninitialized_var(drive); | 916 | ide_drive_t *uninitialized_var(drive); |
899 | ide_handler_t *handler; | 917 | ide_handler_t *handler; |
900 | ide_expiry_t *expiry; | 918 | ide_expiry_t *expiry; |
@@ -918,7 +936,6 @@ void ide_timer_expiry (unsigned long data) | |||
918 | printk(KERN_ERR "%s: ->cur_dev was NULL\n", __func__); | 936 | printk(KERN_ERR "%s: ->cur_dev was NULL\n", __func__); |
919 | hwgroup->handler = NULL; | 937 | hwgroup->handler = NULL; |
920 | } else { | 938 | } else { |
921 | ide_hwif_t *hwif; | ||
922 | ide_startstop_t startstop = ide_stopped; | 939 | ide_startstop_t startstop = ide_stopped; |
923 | 940 | ||
924 | if ((expiry = hwgroup->expiry) != NULL) { | 941 | if ((expiry = hwgroup->expiry) != NULL) { |
@@ -964,15 +981,17 @@ void ide_timer_expiry (unsigned long data) | |||
964 | spin_lock_irq(&hwgroup->lock); | 981 | spin_lock_irq(&hwgroup->lock); |
965 | enable_irq(hwif->irq); | 982 | enable_irq(hwif->irq); |
966 | if (startstop == ide_stopped) { | 983 | if (startstop == ide_stopped) { |
967 | ide_unlock_hwgroup(hwgroup); | 984 | ide_unlock_port(hwif); |
968 | plug_device = 1; | 985 | plug_device = 1; |
969 | } | 986 | } |
970 | } | 987 | } |
971 | } | 988 | } |
972 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 989 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
973 | 990 | ||
974 | if (plug_device) | 991 | if (plug_device) { |
992 | ide_unlock_host(hwif->host); | ||
975 | ide_plug_device(drive); | 993 | ide_plug_device(drive); |
994 | } | ||
976 | } | 995 | } |
977 | 996 | ||
978 | /** | 997 | /** |
@@ -1150,7 +1169,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1150 | */ | 1169 | */ |
1151 | if (startstop == ide_stopped) { | 1170 | if (startstop == ide_stopped) { |
1152 | if (hwgroup->handler == NULL) { /* paranoia */ | 1171 | if (hwgroup->handler == NULL) { /* paranoia */ |
1153 | ide_unlock_hwgroup(hwgroup); | 1172 | ide_unlock_port(hwif); |
1154 | plug_device = 1; | 1173 | plug_device = 1; |
1155 | } else | 1174 | } else |
1156 | printk(KERN_ERR "%s: %s: huh? expected NULL handler " | 1175 | printk(KERN_ERR "%s: %s: huh? expected NULL handler " |
@@ -1161,8 +1180,10 @@ out_handled: | |||
1161 | out: | 1180 | out: |
1162 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 1181 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
1163 | out_early: | 1182 | out_early: |
1164 | if (plug_device) | 1183 | if (plug_device) { |
1184 | ide_unlock_host(hwif->host); | ||
1165 | ide_plug_device(drive); | 1185 | ide_plug_device(drive); |
1186 | } | ||
1166 | 1187 | ||
1167 | return irq_ret; | 1188 | return irq_ret; |
1168 | } | 1189 | } |