diff options
author | Michael Holzheu <holzheu@de.ibm.com> | 2006-03-24 06:15:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:18 -0500 |
commit | 5f38433885245dce82aa53c20a6b2efbe81ae350 (patch) | |
tree | 1731a5e3b9092f3ff060ac6aa652be8ec6dde890 /drivers | |
parent | 4cd190a736a97e302c038bd91357d636369d4c6b (diff) |
[PATCH] s390: fix endless retry loop in tape driver
If a tape device is assigned to another host, the interrupt for the assign
operation comes back with deferred condition code 1. Under some conditions
this can lead to an endless loop of retries. Check if the current request is
still in IO in deferred condition code handling and prevent retries when the
request has already been cancelled.
Signed-off-by: Michael Holzheu <holzheu@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/s390/char/tape.h | 1 | ||||
-rw-r--r-- | drivers/s390/char/tape_core.c | 32 | ||||
-rw-r--r-- | drivers/s390/char/tape_std.c | 15 |
3 files changed, 35 insertions, 13 deletions
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 01d865d93791..cd51ace8b610 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -250,6 +250,7 @@ extern void tape_free_request(struct tape_request *); | |||
250 | extern int tape_do_io(struct tape_device *, struct tape_request *); | 250 | extern int tape_do_io(struct tape_device *, struct tape_request *); |
251 | extern int tape_do_io_async(struct tape_device *, struct tape_request *); | 251 | extern int tape_do_io_async(struct tape_device *, struct tape_request *); |
252 | extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); | 252 | extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); |
253 | extern int tape_cancel_io(struct tape_device *, struct tape_request *); | ||
253 | void tape_hotplug_event(struct tape_device *, int major, int action); | 254 | void tape_hotplug_event(struct tape_device *, int major, int action); |
254 | 255 | ||
255 | static inline int | 256 | static inline int |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 5d17149a6529..c6fab5dbdd44 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -761,6 +761,13 @@ __tape_start_next_request(struct tape_device *device) | |||
761 | */ | 761 | */ |
762 | if (request->status == TAPE_REQUEST_IN_IO) | 762 | if (request->status == TAPE_REQUEST_IN_IO) |
763 | return; | 763 | return; |
764 | /* | ||
765 | * Request has already been stopped. We have to wait until | ||
766 | * the request is removed from the queue in the interrupt | ||
767 | * handling. | ||
768 | */ | ||
769 | if (request->status == TAPE_REQUEST_DONE) | ||
770 | return; | ||
764 | 771 | ||
765 | /* | 772 | /* |
766 | * We wanted to cancel the request but the common I/O layer | 773 | * We wanted to cancel the request but the common I/O layer |
@@ -1024,6 +1031,20 @@ tape_do_io_interruptible(struct tape_device *device, | |||
1024 | } | 1031 | } |
1025 | 1032 | ||
1026 | /* | 1033 | /* |
1034 | * Stop running ccw. | ||
1035 | */ | ||
1036 | int | ||
1037 | tape_cancel_io(struct tape_device *device, struct tape_request *request) | ||
1038 | { | ||
1039 | int rc; | ||
1040 | |||
1041 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1042 | rc = __tape_cancel_io(device, request); | ||
1043 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1044 | return rc; | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1027 | * Tape interrupt routine, called from the ccw_device layer | 1048 | * Tape interrupt routine, called from the ccw_device layer |
1028 | */ | 1049 | */ |
1029 | static void | 1050 | static void |
@@ -1068,12 +1089,12 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1068 | * error might still apply. So we just schedule the request to be | 1089 | * error might still apply. So we just schedule the request to be |
1069 | * started later. | 1090 | * started later. |
1070 | */ | 1091 | */ |
1071 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { | 1092 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && |
1072 | PRINT_WARN("(%s): deferred cc=%i. restaring\n", | 1093 | (request->status == TAPE_REQUEST_IN_IO)) { |
1073 | cdev->dev.bus_id, | 1094 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", |
1074 | irb->scsw.cc); | 1095 | device->cdev_id, irb->scsw.cc, irb->scsw.fctl); |
1075 | request->status = TAPE_REQUEST_QUEUED; | 1096 | request->status = TAPE_REQUEST_QUEUED; |
1076 | schedule_work(&device->tape_dnr); | 1097 | schedule_delayed_work(&device->tape_dnr, HZ); |
1077 | return; | 1098 | return; |
1078 | } | 1099 | } |
1079 | 1100 | ||
@@ -1287,4 +1308,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf); | |||
1287 | EXPORT_SYMBOL(tape_do_io); | 1308 | EXPORT_SYMBOL(tape_do_io); |
1288 | EXPORT_SYMBOL(tape_do_io_async); | 1309 | EXPORT_SYMBOL(tape_do_io_async); |
1289 | EXPORT_SYMBOL(tape_do_io_interruptible); | 1310 | EXPORT_SYMBOL(tape_do_io_interruptible); |
1311 | EXPORT_SYMBOL(tape_cancel_io); | ||
1290 | EXPORT_SYMBOL(tape_mtop); | 1312 | EXPORT_SYMBOL(tape_mtop); |
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 2f9fe30989a7..99cf881f41db 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c | |||
@@ -37,20 +37,19 @@ tape_std_assign_timeout(unsigned long data) | |||
37 | { | 37 | { |
38 | struct tape_request * request; | 38 | struct tape_request * request; |
39 | struct tape_device * device; | 39 | struct tape_device * device; |
40 | int rc; | ||
40 | 41 | ||
41 | request = (struct tape_request *) data; | 42 | request = (struct tape_request *) data; |
42 | if ((device = request->device) == NULL) | 43 | if ((device = request->device) == NULL) |
43 | BUG(); | 44 | BUG(); |
44 | 45 | ||
45 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 46 | DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", |
46 | if (request->callback != NULL) { | ||
47 | DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", | ||
48 | device->cdev_id); | 47 | device->cdev_id); |
49 | PRINT_ERR("%s: Assignment timeout. Device busy.\n", | 48 | rc = tape_cancel_io(device, request); |
50 | device->cdev->dev.bus_id); | 49 | if(rc) |
51 | ccw_device_clear(device->cdev, (long) request); | 50 | PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n", |
52 | } | 51 | device->cdev->dev.bus_id, rc); |
53 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 52 | |
54 | } | 53 | } |
55 | 54 | ||
56 | int | 55 | int |