diff options
Diffstat (limited to 'drivers/s390/char')
-rw-r--r-- | drivers/s390/char/tape.h | 7 | ||||
-rw-r--r-- | drivers/s390/char/tape_core.c | 299 |
2 files changed, 181 insertions, 125 deletions
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index d04e6c2c3cc1..01d865d93791 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -3,10 +3,11 @@ | |||
3 | * tape device driver for 3480/3490E/3590 tapes. | 3 | * tape device driver for 3480/3490E/3590 tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
10 | * Stefan Bader <shbader@de.ibm.com> | ||
10 | */ | 11 | */ |
11 | 12 | ||
12 | #ifndef _TAPE_H | 13 | #ifndef _TAPE_H |
@@ -111,6 +112,7 @@ enum tape_request_status { | |||
111 | TAPE_REQUEST_QUEUED, /* request is queued to be processed */ | 112 | TAPE_REQUEST_QUEUED, /* request is queued to be processed */ |
112 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ | 113 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ |
113 | TAPE_REQUEST_DONE, /* request is completed. */ | 114 | TAPE_REQUEST_DONE, /* request is completed. */ |
115 | TAPE_REQUEST_CANCEL, /* request should be canceled. */ | ||
114 | }; | 116 | }; |
115 | 117 | ||
116 | /* Tape CCW request */ | 118 | /* Tape CCW request */ |
@@ -237,6 +239,9 @@ struct tape_device { | |||
237 | /* Block dev frontend data */ | 239 | /* Block dev frontend data */ |
238 | struct tape_blk_data blk_data; | 240 | struct tape_blk_data blk_data; |
239 | #endif | 241 | #endif |
242 | |||
243 | /* Function to start or stop the next request later. */ | ||
244 | struct work_struct tape_dnr; | ||
240 | }; | 245 | }; |
241 | 246 | ||
242 | /* Externals from tape_core.c */ | 247 | /* Externals from tape_core.c */ |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 0597aa0e27ee..6c52e8307dc5 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -3,11 +3,12 @@ | |||
3 | * basic function of the tape device driver | 3 | * basic function of the tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
10 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 10 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
11 | * Stefan Bader <shbader@de.ibm.com> | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | #include <linux/config.h> | 14 | #include <linux/config.h> |
@@ -28,7 +29,7 @@ | |||
28 | #define PRINTK_HEADER "TAPE_CORE: " | 29 | #define PRINTK_HEADER "TAPE_CORE: " |
29 | 30 | ||
30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); | 31 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); |
31 | static void __tape_remove_request(struct tape_device *, struct tape_request *); | 32 | static void tape_delayed_next_request(void * data); |
32 | 33 | ||
33 | /* | 34 | /* |
34 | * One list to contain all tape devices of all disciplines, so | 35 | * One list to contain all tape devices of all disciplines, so |
@@ -257,7 +258,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | |||
257 | * Stop running ccw. Has to be called with the device lock held. | 258 | * Stop running ccw. Has to be called with the device lock held. |
258 | */ | 259 | */ |
259 | static inline int | 260 | static inline int |
260 | __tape_halt_io(struct tape_device *device, struct tape_request *request) | 261 | __tape_cancel_io(struct tape_device *device, struct tape_request *request) |
261 | { | 262 | { |
262 | int retries; | 263 | int retries; |
263 | int rc; | 264 | int rc; |
@@ -270,20 +271,23 @@ __tape_halt_io(struct tape_device *device, struct tape_request *request) | |||
270 | for (retries = 0; retries < 5; retries++) { | 271 | for (retries = 0; retries < 5; retries++) { |
271 | rc = ccw_device_clear(device->cdev, (long) request); | 272 | rc = ccw_device_clear(device->cdev, (long) request); |
272 | 273 | ||
273 | if (rc == 0) { /* Termination successful */ | 274 | switch (rc) { |
274 | request->rc = -EIO; | 275 | case 0: |
275 | request->status = TAPE_REQUEST_DONE; | 276 | request->status = TAPE_REQUEST_DONE; |
276 | return 0; | 277 | return 0; |
278 | case -EBUSY: | ||
279 | request->status = TAPE_REQUEST_CANCEL; | ||
280 | schedule_work(&device->tape_dnr); | ||
281 | return 0; | ||
282 | case -ENODEV: | ||
283 | DBF_EXCEPTION(2, "device gone, retry\n"); | ||
284 | break; | ||
285 | case -EIO: | ||
286 | DBF_EXCEPTION(2, "I/O error, retry\n"); | ||
287 | break; | ||
288 | default: | ||
289 | BUG(); | ||
277 | } | 290 | } |
278 | |||
279 | if (rc == -ENODEV) | ||
280 | DBF_EXCEPTION(2, "device gone, retry\n"); | ||
281 | else if (rc == -EIO) | ||
282 | DBF_EXCEPTION(2, "I/O error, retry\n"); | ||
283 | else if (rc == -EBUSY) | ||
284 | DBF_EXCEPTION(2, "device busy, retry late\n"); | ||
285 | else | ||
286 | BUG(); | ||
287 | } | 291 | } |
288 | 292 | ||
289 | return rc; | 293 | return rc; |
@@ -473,6 +477,7 @@ tape_alloc_device(void) | |||
473 | *device->modeset_byte = 0; | 477 | *device->modeset_byte = 0; |
474 | device->first_minor = -1; | 478 | device->first_minor = -1; |
475 | atomic_set(&device->ref_count, 1); | 479 | atomic_set(&device->ref_count, 1); |
480 | INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); | ||
476 | 481 | ||
477 | return device; | 482 | return device; |
478 | } | 483 | } |
@@ -708,54 +713,119 @@ tape_free_request (struct tape_request * request) | |||
708 | kfree(request); | 713 | kfree(request); |
709 | } | 714 | } |
710 | 715 | ||
716 | static inline int | ||
717 | __tape_start_io(struct tape_device *device, struct tape_request *request) | ||
718 | { | ||
719 | int rc; | ||
720 | |||
721 | #ifdef CONFIG_S390_TAPE_BLOCK | ||
722 | if (request->op == TO_BLOCK) | ||
723 | device->discipline->check_locate(device, request); | ||
724 | #endif | ||
725 | rc = ccw_device_start( | ||
726 | device->cdev, | ||
727 | request->cpaddr, | ||
728 | (unsigned long) request, | ||
729 | 0x00, | ||
730 | request->options | ||
731 | ); | ||
732 | if (rc == 0) { | ||
733 | request->status = TAPE_REQUEST_IN_IO; | ||
734 | } else if (rc == -EBUSY) { | ||
735 | /* The common I/O subsystem is currently busy. Retry later. */ | ||
736 | request->status = TAPE_REQUEST_QUEUED; | ||
737 | schedule_work(&device->tape_dnr); | ||
738 | rc = 0; | ||
739 | } else { | ||
740 | /* Start failed. Remove request and indicate failure. */ | ||
741 | DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); | ||
742 | } | ||
743 | return rc; | ||
744 | } | ||
745 | |||
711 | static inline void | 746 | static inline void |
712 | __tape_do_io_list(struct tape_device *device) | 747 | __tape_start_next_request(struct tape_device *device) |
713 | { | 748 | { |
714 | struct list_head *l, *n; | 749 | struct list_head *l, *n; |
715 | struct tape_request *request; | 750 | struct tape_request *request; |
716 | int rc; | 751 | int rc; |
717 | 752 | ||
718 | DBF_LH(6, "__tape_do_io_list(%p)\n", device); | 753 | DBF_LH(6, "__tape_start_next_request(%p)\n", device); |
719 | /* | 754 | /* |
720 | * Try to start each request on request queue until one is | 755 | * Try to start each request on request queue until one is |
721 | * started successful. | 756 | * started successful. |
722 | */ | 757 | */ |
723 | list_for_each_safe(l, n, &device->req_queue) { | 758 | list_for_each_safe(l, n, &device->req_queue) { |
724 | request = list_entry(l, struct tape_request, list); | 759 | request = list_entry(l, struct tape_request, list); |
725 | #ifdef CONFIG_S390_TAPE_BLOCK | 760 | |
726 | if (request->op == TO_BLOCK) | 761 | /* |
727 | device->discipline->check_locate(device, request); | 762 | * Avoid race condition if bottom-half was triggered more than |
728 | #endif | 763 | * once. |
729 | rc = ccw_device_start(device->cdev, request->cpaddr, | 764 | */ |
730 | (unsigned long) request, 0x00, | 765 | if (request->status == TAPE_REQUEST_IN_IO) |
731 | request->options); | 766 | return; |
732 | if (rc == 0) { | 767 | |
733 | request->status = TAPE_REQUEST_IN_IO; | 768 | /* |
734 | break; | 769 | * We wanted to cancel the request but the common I/O layer |
770 | * was busy at that time. This can only happen if this | ||
771 | * function is called by delayed_next_request. | ||
772 | * Otherwise we start the next request on the queue. | ||
773 | */ | ||
774 | if (request->status == TAPE_REQUEST_CANCEL) { | ||
775 | rc = __tape_cancel_io(device, request); | ||
776 | } else { | ||
777 | rc = __tape_start_io(device, request); | ||
735 | } | 778 | } |
736 | /* Start failed. Remove request and indicate failure. */ | 779 | if (rc == 0) |
737 | DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); | 780 | return; |
738 | 781 | ||
739 | /* Set ending status and do callback. */ | 782 | /* Set ending status. */ |
740 | request->rc = rc; | 783 | request->rc = rc; |
741 | request->status = TAPE_REQUEST_DONE; | 784 | request->status = TAPE_REQUEST_DONE; |
742 | __tape_remove_request(device, request); | 785 | |
786 | /* Remove from request queue. */ | ||
787 | list_del(&request->list); | ||
788 | |||
789 | /* Do callback. */ | ||
790 | if (request->callback != NULL) | ||
791 | request->callback(request, request->callback_data); | ||
743 | } | 792 | } |
744 | } | 793 | } |
745 | 794 | ||
746 | static void | 795 | static void |
747 | __tape_remove_request(struct tape_device *device, struct tape_request *request) | 796 | tape_delayed_next_request(void *data) |
748 | { | 797 | { |
749 | /* Remove from request queue. */ | 798 | struct tape_device * device; |
750 | list_del(&request->list); | ||
751 | 799 | ||
752 | /* Do callback. */ | 800 | device = (struct tape_device *) data; |
753 | if (request->callback != NULL) | 801 | DBF_LH(6, "tape_delayed_next_request(%p)\n", device); |
754 | request->callback(request, request->callback_data); | 802 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
803 | __tape_start_next_request(device); | ||
804 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
805 | } | ||
806 | |||
807 | static inline void | ||
808 | __tape_end_request( | ||
809 | struct tape_device * device, | ||
810 | struct tape_request * request, | ||
811 | int rc) | ||
812 | { | ||
813 | DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); | ||
814 | if (request) { | ||
815 | request->rc = rc; | ||
816 | request->status = TAPE_REQUEST_DONE; | ||
817 | |||
818 | /* Remove from request queue. */ | ||
819 | list_del(&request->list); | ||
820 | |||
821 | /* Do callback. */ | ||
822 | if (request->callback != NULL) | ||
823 | request->callback(request, request->callback_data); | ||
824 | } | ||
755 | 825 | ||
756 | /* Start next request. */ | 826 | /* Start next request. */ |
757 | if (!list_empty(&device->req_queue)) | 827 | if (!list_empty(&device->req_queue)) |
758 | __tape_do_io_list(device); | 828 | __tape_start_next_request(device); |
759 | } | 829 | } |
760 | 830 | ||
761 | /* | 831 | /* |
@@ -812,7 +882,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, | |||
812 | * the device lock held. | 882 | * the device lock held. |
813 | */ | 883 | */ |
814 | static inline int | 884 | static inline int |
815 | __tape_do_io(struct tape_device *device, struct tape_request *request) | 885 | __tape_start_request(struct tape_device *device, struct tape_request *request) |
816 | { | 886 | { |
817 | int rc; | 887 | int rc; |
818 | 888 | ||
@@ -837,24 +907,16 @@ __tape_do_io(struct tape_device *device, struct tape_request *request) | |||
837 | 907 | ||
838 | if (list_empty(&device->req_queue)) { | 908 | if (list_empty(&device->req_queue)) { |
839 | /* No other requests are on the queue. Start this one. */ | 909 | /* No other requests are on the queue. Start this one. */ |
840 | #ifdef CONFIG_S390_TAPE_BLOCK | 910 | rc = __tape_start_io(device, request); |
841 | if (request->op == TO_BLOCK) | 911 | if (rc) |
842 | device->discipline->check_locate(device, request); | ||
843 | #endif | ||
844 | rc = ccw_device_start(device->cdev, request->cpaddr, | ||
845 | (unsigned long) request, 0x00, | ||
846 | request->options); | ||
847 | if (rc) { | ||
848 | DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc); | ||
849 | return rc; | 912 | return rc; |
850 | } | 913 | |
851 | DBF_LH(5, "Request %p added for execution.\n", request); | 914 | DBF_LH(5, "Request %p added for execution.\n", request); |
852 | list_add(&request->list, &device->req_queue); | 915 | list_add(&request->list, &device->req_queue); |
853 | request->status = TAPE_REQUEST_IN_IO; | ||
854 | } else { | 916 | } else { |
855 | DBF_LH(5, "Request %p add to queue.\n", request); | 917 | DBF_LH(5, "Request %p add to queue.\n", request); |
856 | list_add_tail(&request->list, &device->req_queue); | ||
857 | request->status = TAPE_REQUEST_QUEUED; | 918 | request->status = TAPE_REQUEST_QUEUED; |
919 | list_add_tail(&request->list, &device->req_queue); | ||
858 | } | 920 | } |
859 | return 0; | 921 | return 0; |
860 | } | 922 | } |
@@ -872,7 +934,7 @@ tape_do_io_async(struct tape_device *device, struct tape_request *request) | |||
872 | 934 | ||
873 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 935 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
874 | /* Add request to request queue and try to start it. */ | 936 | /* Add request to request queue and try to start it. */ |
875 | rc = __tape_do_io(device, request); | 937 | rc = __tape_start_request(device, request); |
876 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 938 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
877 | return rc; | 939 | return rc; |
878 | } | 940 | } |
@@ -901,7 +963,7 @@ tape_do_io(struct tape_device *device, struct tape_request *request) | |||
901 | request->callback = __tape_wake_up; | 963 | request->callback = __tape_wake_up; |
902 | request->callback_data = &wq; | 964 | request->callback_data = &wq; |
903 | /* Add request to request queue and try to start it. */ | 965 | /* Add request to request queue and try to start it. */ |
904 | rc = __tape_do_io(device, request); | 966 | rc = __tape_start_request(device, request); |
905 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 967 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
906 | if (rc) | 968 | if (rc) |
907 | return rc; | 969 | return rc; |
@@ -935,7 +997,7 @@ tape_do_io_interruptible(struct tape_device *device, | |||
935 | /* Setup callback */ | 997 | /* Setup callback */ |
936 | request->callback = __tape_wake_up_interruptible; | 998 | request->callback = __tape_wake_up_interruptible; |
937 | request->callback_data = &wq; | 999 | request->callback_data = &wq; |
938 | rc = __tape_do_io(device, request); | 1000 | rc = __tape_start_request(device, request); |
939 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1001 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
940 | if (rc) | 1002 | if (rc) |
941 | return rc; | 1003 | return rc; |
@@ -944,36 +1006,27 @@ tape_do_io_interruptible(struct tape_device *device, | |||
944 | if (rc != -ERESTARTSYS) | 1006 | if (rc != -ERESTARTSYS) |
945 | /* Request finished normally. */ | 1007 | /* Request finished normally. */ |
946 | return request->rc; | 1008 | return request->rc; |
1009 | |||
947 | /* Interrupted by a signal. We have to stop the current request. */ | 1010 | /* Interrupted by a signal. We have to stop the current request. */ |
948 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1011 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
949 | rc = __tape_halt_io(device, request); | 1012 | rc = __tape_cancel_io(device, request); |
1013 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
950 | if (rc == 0) { | 1014 | if (rc == 0) { |
1015 | /* Wait for the interrupt that acknowledges the halt. */ | ||
1016 | do { | ||
1017 | rc = wait_event_interruptible( | ||
1018 | wq, | ||
1019 | (request->callback == NULL) | ||
1020 | ); | ||
1021 | } while (rc != -ERESTARTSYS); | ||
1022 | |||
951 | DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); | 1023 | DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); |
952 | rc = -ERESTARTSYS; | 1024 | rc = -ERESTARTSYS; |
953 | } | 1025 | } |
954 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
955 | return rc; | 1026 | return rc; |
956 | } | 1027 | } |
957 | 1028 | ||
958 | /* | 1029 | /* |
959 | * Handle requests that return an i/o error in the irb. | ||
960 | */ | ||
961 | static inline void | ||
962 | tape_handle_killed_request( | ||
963 | struct tape_device *device, | ||
964 | struct tape_request *request) | ||
965 | { | ||
966 | if(request != NULL) { | ||
967 | /* Set ending status. FIXME: Should the request be retried? */ | ||
968 | request->rc = -EIO; | ||
969 | request->status = TAPE_REQUEST_DONE; | ||
970 | __tape_remove_request(device, request); | ||
971 | } else { | ||
972 | __tape_do_io_list(device); | ||
973 | } | ||
974 | } | ||
975 | |||
976 | /* | ||
977 | * Tape interrupt routine, called from the ccw_device layer | 1030 | * Tape interrupt routine, called from the ccw_device layer |
978 | */ | 1031 | */ |
979 | static void | 1032 | static void |
@@ -981,7 +1034,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
981 | { | 1034 | { |
982 | struct tape_device *device; | 1035 | struct tape_device *device; |
983 | struct tape_request *request; | 1036 | struct tape_request *request; |
984 | int final; | ||
985 | int rc; | 1037 | int rc; |
986 | 1038 | ||
987 | device = (struct tape_device *) cdev->dev.driver_data; | 1039 | device = (struct tape_device *) cdev->dev.driver_data; |
@@ -996,12 +1048,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
996 | 1048 | ||
997 | /* On special conditions irb is an error pointer */ | 1049 | /* On special conditions irb is an error pointer */ |
998 | if (IS_ERR(irb)) { | 1050 | if (IS_ERR(irb)) { |
1051 | /* FIXME: What to do with the request? */ | ||
999 | switch (PTR_ERR(irb)) { | 1052 | switch (PTR_ERR(irb)) { |
1000 | case -ETIMEDOUT: | 1053 | case -ETIMEDOUT: |
1001 | PRINT_WARN("(%s): Request timed out\n", | 1054 | PRINT_WARN("(%s): Request timed out\n", |
1002 | cdev->dev.bus_id); | 1055 | cdev->dev.bus_id); |
1003 | case -EIO: | 1056 | case -EIO: |
1004 | tape_handle_killed_request(device, request); | 1057 | __tape_end_request(device, request, -EIO); |
1005 | break; | 1058 | break; |
1006 | default: | 1059 | default: |
1007 | PRINT_ERR("(%s): Unexpected i/o error %li\n", | 1060 | PRINT_ERR("(%s): Unexpected i/o error %li\n", |
@@ -1011,6 +1064,21 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1011 | return; | 1064 | return; |
1012 | } | 1065 | } |
1013 | 1066 | ||
1067 | /* | ||
1068 | * If the condition code is not zero and the start function bit is | ||
1069 | * still set, this is an deferred error and the last start I/O did | ||
1070 | * not succeed. Restart the request now. | ||
1071 | */ | ||
1072 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { | ||
1073 | PRINT_WARN("(%s): deferred cc=%i. restaring\n", | ||
1074 | cdev->dev.bus_id, | ||
1075 | irb->scsw.cc); | ||
1076 | rc = __tape_start_io(device, request); | ||
1077 | if (rc) | ||
1078 | __tape_end_request(device, request, rc); | ||
1079 | return; | ||
1080 | } | ||
1081 | |||
1014 | /* May be an unsolicited irq */ | 1082 | /* May be an unsolicited irq */ |
1015 | if(request != NULL) | 1083 | if(request != NULL) |
1016 | request->rescnt = irb->scsw.count; | 1084 | request->rescnt = irb->scsw.count; |
@@ -1042,7 +1110,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1042 | * To detect these request the state will be set to TAPE_REQUEST_DONE. | 1110 | * To detect these request the state will be set to TAPE_REQUEST_DONE. |
1043 | */ | 1111 | */ |
1044 | if(request != NULL && request->status == TAPE_REQUEST_DONE) { | 1112 | if(request != NULL && request->status == TAPE_REQUEST_DONE) { |
1045 | __tape_remove_request(device, request); | 1113 | __tape_end_request(device, request, -EIO); |
1046 | return; | 1114 | return; |
1047 | } | 1115 | } |
1048 | 1116 | ||
@@ -1054,51 +1122,34 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1054 | * rc == TAPE_IO_RETRY: request finished but needs another go. | 1122 | * rc == TAPE_IO_RETRY: request finished but needs another go. |
1055 | * rc == TAPE_IO_STOP: request needs to get terminated. | 1123 | * rc == TAPE_IO_STOP: request needs to get terminated. |
1056 | */ | 1124 | */ |
1057 | final = 0; | ||
1058 | switch (rc) { | 1125 | switch (rc) { |
1059 | case TAPE_IO_SUCCESS: | 1126 | case TAPE_IO_SUCCESS: |
1060 | /* Upon normal completion the device _is_ online */ | 1127 | /* Upon normal completion the device _is_ online */ |
1061 | device->tape_generic_status |= GMT_ONLINE(~0); | 1128 | device->tape_generic_status |= GMT_ONLINE(~0); |
1062 | final = 1; | 1129 | __tape_end_request(device, request, rc); |
1063 | break; | 1130 | break; |
1064 | case TAPE_IO_PENDING: | 1131 | case TAPE_IO_PENDING: |
1065 | break; | 1132 | break; |
1066 | case TAPE_IO_RETRY: | 1133 | case TAPE_IO_RETRY: |
1067 | #ifdef CONFIG_S390_TAPE_BLOCK | 1134 | rc = __tape_start_io(device, request); |
1068 | if (request->op == TO_BLOCK) | 1135 | if (rc) |
1069 | device->discipline->check_locate(device, request); | 1136 | __tape_end_request(device, request, rc); |
1070 | #endif | 1137 | break; |
1071 | rc = ccw_device_start(cdev, request->cpaddr, | 1138 | case TAPE_IO_STOP: |
1072 | (unsigned long) request, 0x00, | 1139 | rc = __tape_cancel_io(device, request); |
1073 | request->options); | 1140 | if (rc) |
1074 | if (rc) { | 1141 | __tape_end_request(device, request, rc); |
1075 | DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); | 1142 | break; |
1076 | final = 1; | 1143 | default: |
1077 | } | 1144 | if (rc > 0) { |
1078 | break; | 1145 | DBF_EVENT(6, "xunknownrc\n"); |
1079 | case TAPE_IO_STOP: | 1146 | PRINT_ERR("Invalid return code from discipline " |
1080 | __tape_halt_io(device, request); | 1147 | "interrupt function.\n"); |
1081 | break; | 1148 | __tape_end_request(device, request, -EIO); |
1082 | default: | 1149 | } else { |
1083 | if (rc > 0) { | 1150 | __tape_end_request(device, request, rc); |
1084 | DBF_EVENT(6, "xunknownrc\n"); | 1151 | } |
1085 | PRINT_ERR("Invalid return code from discipline " | 1152 | break; |
1086 | "interrupt function.\n"); | ||
1087 | rc = -EIO; | ||
1088 | } | ||
1089 | final = 1; | ||
1090 | break; | ||
1091 | } | ||
1092 | if (final) { | ||
1093 | /* May be an unsolicited irq */ | ||
1094 | if(request != NULL) { | ||
1095 | /* Set ending status. */ | ||
1096 | request->rc = rc; | ||
1097 | request->status = TAPE_REQUEST_DONE; | ||
1098 | __tape_remove_request(device, request); | ||
1099 | } else { | ||
1100 | __tape_do_io_list(device); | ||
1101 | } | ||
1102 | } | 1153 | } |
1103 | } | 1154 | } |
1104 | 1155 | ||
@@ -1191,7 +1242,7 @@ tape_init (void) | |||
1191 | #ifdef DBF_LIKE_HELL | 1242 | #ifdef DBF_LIKE_HELL |
1192 | debug_set_level(TAPE_DBF_AREA, 6); | 1243 | debug_set_level(TAPE_DBF_AREA, 6); |
1193 | #endif | 1244 | #endif |
1194 | DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n"); | 1245 | DBF_EVENT(3, "tape init: ($Revision: 1.54 $)\n"); |
1195 | tape_proc_init(); | 1246 | tape_proc_init(); |
1196 | tapechar_init (); | 1247 | tapechar_init (); |
1197 | tapeblock_init (); | 1248 | tapeblock_init (); |
@@ -1216,7 +1267,7 @@ tape_exit(void) | |||
1216 | MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " | 1267 | MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " |
1217 | "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); | 1268 | "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); |
1218 | MODULE_DESCRIPTION("Linux on zSeries channel attached " | 1269 | MODULE_DESCRIPTION("Linux on zSeries channel attached " |
1219 | "tape device driver ($Revision: 1.51 $)"); | 1270 | "tape device driver ($Revision: 1.54 $)"); |
1220 | MODULE_LICENSE("GPL"); | 1271 | MODULE_LICENSE("GPL"); |
1221 | 1272 | ||
1222 | module_init(tape_init); | 1273 | module_init(tape_init); |