diff options
Diffstat (limited to 'drivers/s390')
25 files changed, 247 insertions, 1487 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6912399d0937..6f50cc9323d9 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig | |||
@@ -55,21 +55,13 @@ config DASD_DIAG | |||
55 | Disks under VM. If you are not running under VM or unsure what it is, | 55 | Disks under VM. If you are not running under VM or unsure what it is, |
56 | say "N". | 56 | say "N". |
57 | 57 | ||
58 | config DASD_EER | ||
59 | tristate "Extended error reporting (EER)" | ||
60 | depends on DASD | ||
61 | help | ||
62 | This driver provides a character device interface to the | ||
63 | DASD extended error reporting. This is only needed if you want to | ||
64 | use applications written for the EER facility. | ||
65 | |||
66 | config DASD_CMB | 58 | config DASD_CMB |
67 | tristate "Compatibility interface for DASD channel measurement blocks" | 59 | tristate "Compatibility interface for DASD channel measurement blocks" |
68 | depends on DASD | 60 | depends on DASD |
69 | help | 61 | help |
70 | This driver provides an additional interface to the channel | 62 | This driver provides an additional interface to the channel measurement |
71 | measurement facility, which is normally accessed though sysfs, with | 63 | facility, which is normally accessed though sysfs, with a set of |
72 | a set of ioctl functions specific to the dasd driver. | 64 | ioctl functions specific to the dasd driver. |
73 | This is only needed if you want to use applications written for | 65 | This is only needed if you want to use applications written for |
74 | linux-2.4 dasd channel measurement facility interface. | 66 | linux-2.4 dasd channel measurement facility interface. |
75 | 67 | ||
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 0c0d871e8f51..58c6780134f7 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile | |||
@@ -5,7 +5,6 @@ | |||
5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o | 5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o |
6 | dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o | 6 | dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o |
7 | dasd_diag_mod-objs := dasd_diag.o | 7 | dasd_diag_mod-objs := dasd_diag.o |
8 | dasd_eer_mod-objs := dasd_eer.o | ||
9 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ | 8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ |
10 | dasd_genhd.o dasd_erp.o | 9 | dasd_genhd.o dasd_erp.o |
11 | 10 | ||
@@ -14,6 +13,5 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o | |||
14 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o | 13 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o |
15 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o | 14 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o |
16 | obj-$(CONFIG_DASD_CMB) += dasd_cmb.o | 15 | obj-$(CONFIG_DASD_CMB) += dasd_cmb.o |
17 | obj-$(CONFIG_DASD_EER) += dasd_eer.o | ||
18 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o | 16 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o |
19 | obj-$(CONFIG_DCSSBLK) += dcssblk.o | 17 | obj-$(CONFIG_DCSSBLK) += dcssblk.o |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 08c88fcd8963..af1d5b404cee 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/buffer_head.h> | 19 | #include <linux/buffer_head.h> |
20 | #include <linux/hdreg.h> | 20 | #include <linux/hdreg.h> |
21 | #include <linux/notifier.h> | ||
22 | 21 | ||
23 | #include <asm/ccwdev.h> | 22 | #include <asm/ccwdev.h> |
24 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
@@ -58,7 +57,6 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | |||
58 | static void dasd_flush_ccw_queue(struct dasd_device *, int); | 57 | static void dasd_flush_ccw_queue(struct dasd_device *, int); |
59 | static void dasd_tasklet(struct dasd_device *); | 58 | static void dasd_tasklet(struct dasd_device *); |
60 | static void do_kick_device(void *data); | 59 | static void do_kick_device(void *data); |
61 | static void dasd_disable_eer(struct dasd_device *device); | ||
62 | 60 | ||
63 | /* | 61 | /* |
64 | * SECTION: Operations on the device structure. | 62 | * SECTION: Operations on the device structure. |
@@ -153,10 +151,13 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
153 | static inline void | 151 | static inline void |
154 | dasd_state_known_to_new(struct dasd_device * device) | 152 | dasd_state_known_to_new(struct dasd_device * device) |
155 | { | 153 | { |
156 | /* disable extended error reporting for this device */ | ||
157 | dasd_disable_eer(device); | ||
158 | /* Forget the discipline information. */ | 154 | /* Forget the discipline information. */ |
155 | if (device->discipline) | ||
156 | module_put(device->discipline->owner); | ||
159 | device->discipline = NULL; | 157 | device->discipline = NULL; |
158 | if (device->base_discipline) | ||
159 | module_put(device->base_discipline->owner); | ||
160 | device->base_discipline = NULL; | ||
160 | device->state = DASD_STATE_NEW; | 161 | device->state = DASD_STATE_NEW; |
161 | 162 | ||
162 | dasd_free_queue(device); | 163 | dasd_free_queue(device); |
@@ -871,9 +872,6 @@ dasd_handle_state_change_pending(struct dasd_device *device) | |||
871 | struct dasd_ccw_req *cqr; | 872 | struct dasd_ccw_req *cqr; |
872 | struct list_head *l, *n; | 873 | struct list_head *l, *n; |
873 | 874 | ||
874 | /* first of all call extended error reporting */ | ||
875 | dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL); | ||
876 | |||
877 | device->stopped &= ~DASD_STOPPED_PENDING; | 875 | device->stopped &= ~DASD_STOPPED_PENDING; |
878 | 876 | ||
879 | /* restart all 'running' IO on queue */ | 877 | /* restart all 'running' IO on queue */ |
@@ -1093,19 +1091,6 @@ restart: | |||
1093 | } | 1091 | } |
1094 | goto restart; | 1092 | goto restart; |
1095 | } | 1093 | } |
1096 | |||
1097 | /* first of all call extended error reporting */ | ||
1098 | if (device->eer && cqr->status == DASD_CQR_FAILED) { | ||
1099 | dasd_write_eer_trigger(DASD_EER_FATALERROR, | ||
1100 | device, cqr); | ||
1101 | |||
1102 | /* restart request */ | ||
1103 | cqr->status = DASD_CQR_QUEUED; | ||
1104 | cqr->retries = 255; | ||
1105 | device->stopped |= DASD_STOPPED_QUIESCE; | ||
1106 | goto restart; | ||
1107 | } | ||
1108 | |||
1109 | /* Process finished ERP request. */ | 1094 | /* Process finished ERP request. */ |
1110 | if (cqr->refers) { | 1095 | if (cqr->refers) { |
1111 | __dasd_process_erp(device, cqr); | 1096 | __dasd_process_erp(device, cqr); |
@@ -1243,8 +1228,7 @@ __dasd_start_head(struct dasd_device * device) | |||
1243 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1228 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1244 | /* check FAILFAST */ | 1229 | /* check FAILFAST */ |
1245 | if (device->stopped & ~DASD_STOPPED_PENDING && | 1230 | if (device->stopped & ~DASD_STOPPED_PENDING && |
1246 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | 1231 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { |
1247 | (!device->eer)) { | ||
1248 | cqr->status = DASD_CQR_FAILED; | 1232 | cqr->status = DASD_CQR_FAILED; |
1249 | dasd_schedule_bh(device); | 1233 | dasd_schedule_bh(device); |
1250 | } | 1234 | } |
@@ -1880,9 +1864,10 @@ dasd_generic_remove (struct ccw_device *cdev) | |||
1880 | */ | 1864 | */ |
1881 | int | 1865 | int |
1882 | dasd_generic_set_online (struct ccw_device *cdev, | 1866 | dasd_generic_set_online (struct ccw_device *cdev, |
1883 | struct dasd_discipline *discipline) | 1867 | struct dasd_discipline *base_discipline) |
1884 | 1868 | ||
1885 | { | 1869 | { |
1870 | struct dasd_discipline *discipline; | ||
1886 | struct dasd_device *device; | 1871 | struct dasd_device *device; |
1887 | int rc; | 1872 | int rc; |
1888 | 1873 | ||
@@ -1890,6 +1875,7 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
1890 | if (IS_ERR(device)) | 1875 | if (IS_ERR(device)) |
1891 | return PTR_ERR(device); | 1876 | return PTR_ERR(device); |
1892 | 1877 | ||
1878 | discipline = base_discipline; | ||
1893 | if (device->features & DASD_FEATURE_USEDIAG) { | 1879 | if (device->features & DASD_FEATURE_USEDIAG) { |
1894 | if (!dasd_diag_discipline_pointer) { | 1880 | if (!dasd_diag_discipline_pointer) { |
1895 | printk (KERN_WARNING | 1881 | printk (KERN_WARNING |
@@ -1901,6 +1887,16 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
1901 | } | 1887 | } |
1902 | discipline = dasd_diag_discipline_pointer; | 1888 | discipline = dasd_diag_discipline_pointer; |
1903 | } | 1889 | } |
1890 | if (!try_module_get(base_discipline->owner)) { | ||
1891 | dasd_delete_device(device); | ||
1892 | return -EINVAL; | ||
1893 | } | ||
1894 | if (!try_module_get(discipline->owner)) { | ||
1895 | module_put(base_discipline->owner); | ||
1896 | dasd_delete_device(device); | ||
1897 | return -EINVAL; | ||
1898 | } | ||
1899 | device->base_discipline = base_discipline; | ||
1904 | device->discipline = discipline; | 1900 | device->discipline = discipline; |
1905 | 1901 | ||
1906 | rc = discipline->check_device(device); | 1902 | rc = discipline->check_device(device); |
@@ -1909,6 +1905,8 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
1909 | "dasd_generic couldn't online device %s " | 1905 | "dasd_generic couldn't online device %s " |
1910 | "with discipline %s rc=%i\n", | 1906 | "with discipline %s rc=%i\n", |
1911 | cdev->dev.bus_id, discipline->name, rc); | 1907 | cdev->dev.bus_id, discipline->name, rc); |
1908 | module_put(discipline->owner); | ||
1909 | module_put(base_discipline->owner); | ||
1912 | dasd_delete_device(device); | 1910 | dasd_delete_device(device); |
1913 | return rc; | 1911 | return rc; |
1914 | } | 1912 | } |
@@ -1986,9 +1984,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event) | |||
1986 | switch (event) { | 1984 | switch (event) { |
1987 | case CIO_GONE: | 1985 | case CIO_GONE: |
1988 | case CIO_NO_PATH: | 1986 | case CIO_NO_PATH: |
1989 | /* first of all call extended error reporting */ | ||
1990 | dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL); | ||
1991 | |||
1992 | if (device->state < DASD_STATE_BASIC) | 1987 | if (device->state < DASD_STATE_BASIC) |
1993 | break; | 1988 | break; |
1994 | /* Device is active. We want to keep it. */ | 1989 | /* Device is active. We want to keep it. */ |
@@ -2046,51 +2041,6 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver) | |||
2046 | put_driver(drv); | 2041 | put_driver(drv); |
2047 | } | 2042 | } |
2048 | 2043 | ||
2049 | /* | ||
2050 | * notifications for extended error reports | ||
2051 | */ | ||
2052 | static struct notifier_block *dasd_eer_chain; | ||
2053 | |||
2054 | int | ||
2055 | dasd_register_eer_notifier(struct notifier_block *nb) | ||
2056 | { | ||
2057 | return notifier_chain_register(&dasd_eer_chain, nb); | ||
2058 | } | ||
2059 | |||
2060 | int | ||
2061 | dasd_unregister_eer_notifier(struct notifier_block *nb) | ||
2062 | { | ||
2063 | return notifier_chain_unregister(&dasd_eer_chain, nb); | ||
2064 | } | ||
2065 | |||
2066 | /* | ||
2067 | * Notify the registered error reporting module of a problem | ||
2068 | */ | ||
2069 | void | ||
2070 | dasd_write_eer_trigger(unsigned int id, struct dasd_device *device, | ||
2071 | struct dasd_ccw_req *cqr) | ||
2072 | { | ||
2073 | if (device->eer) { | ||
2074 | struct dasd_eer_trigger temp; | ||
2075 | temp.id = id; | ||
2076 | temp.device = device; | ||
2077 | temp.cqr = cqr; | ||
2078 | notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER, | ||
2079 | (void *)&temp); | ||
2080 | } | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2084 | * Tell the registered error reporting module to disable error reporting for | ||
2085 | * a given device and to cleanup any private data structures on that device. | ||
2086 | */ | ||
2087 | static void | ||
2088 | dasd_disable_eer(struct dasd_device *device) | ||
2089 | { | ||
2090 | notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device); | ||
2091 | } | ||
2092 | |||
2093 | |||
2094 | static int __init | 2044 | static int __init |
2095 | dasd_init(void) | 2045 | dasd_init(void) |
2096 | { | 2046 | { |
@@ -2172,11 +2122,6 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); | |||
2172 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); | 2122 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); |
2173 | EXPORT_SYMBOL_GPL(dasd_generic_auto_online); | 2123 | EXPORT_SYMBOL_GPL(dasd_generic_auto_online); |
2174 | 2124 | ||
2175 | EXPORT_SYMBOL(dasd_register_eer_notifier); | ||
2176 | EXPORT_SYMBOL(dasd_unregister_eer_notifier); | ||
2177 | EXPORT_SYMBOL(dasd_write_eer_trigger); | ||
2178 | |||
2179 | |||
2180 | /* | 2125 | /* |
2181 | * Overrides for Emacs so that we follow Linus's tabbing style. | 2126 | * Overrides for Emacs so that we follow Linus's tabbing style. |
2182 | * Emacs will notice this stuff at the end of the file and automatically | 2127 | * Emacs will notice this stuff at the end of the file and automatically |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c811380b9079..4ee0f934e325 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -1108,9 +1108,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) | |||
1108 | case 0x0B: | 1108 | case 0x0B: |
1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
1110 | "FORMAT F - Volume is suspended duplex"); | 1110 | "FORMAT F - Volume is suspended duplex"); |
1111 | /* call extended error reporting (EER) */ | ||
1112 | dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device, | ||
1113 | erp->refers); | ||
1114 | break; | 1111 | break; |
1115 | case 0x0C: | 1112 | case 0x0C: |
1116 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1113 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index e15dd7978050..bc3823d35223 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #define DASD_ECKD_CCW_PSF 0x27 | 29 | #define DASD_ECKD_CCW_PSF 0x27 |
30 | #define DASD_ECKD_CCW_RSSD 0x3e | 30 | #define DASD_ECKD_CCW_RSSD 0x3e |
31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 | 31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 |
32 | #define DASD_ECKD_CCW_SNSS 0x54 | ||
33 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 | 32 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 |
34 | #define DASD_ECKD_CCW_WRITE_MT 0x85 | 33 | #define DASD_ECKD_CCW_WRITE_MT 0x85 |
35 | #define DASD_ECKD_CCW_READ_MT 0x86 | 34 | #define DASD_ECKD_CCW_READ_MT 0x86 |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c deleted file mode 100644 index f70cd7716b24..000000000000 --- a/drivers/s390/block/dasd_eer.c +++ /dev/null | |||
@@ -1,1090 +0,0 @@ | |||
1 | /* | ||
2 | * character device driver for extended error reporting | ||
3 | * | ||
4 | * | ||
5 | * Copyright (C) 2005 IBM Corporation | ||
6 | * extended error reporting for DASD ECKD devices | ||
7 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/miscdevice.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/moduleparam.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include <linux/poll.h> | ||
20 | #include <linux/notifier.h> | ||
21 | |||
22 | #include <asm/uaccess.h> | ||
23 | #include <asm/semaphore.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/ebcdic.h> | ||
26 | |||
27 | #include "dasd_int.h" | ||
28 | #include "dasd_eckd.h" | ||
29 | |||
30 | |||
31 | MODULE_LICENSE("GPL"); | ||
32 | |||
33 | MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>"); | ||
34 | MODULE_DESCRIPTION("DASD extended error reporting module"); | ||
35 | |||
36 | |||
37 | #ifdef PRINTK_HEADER | ||
38 | #undef PRINTK_HEADER | ||
39 | #endif /* PRINTK_HEADER */ | ||
40 | #define PRINTK_HEADER "dasd(eer):" | ||
41 | |||
42 | |||
43 | |||
44 | |||
45 | |||
46 | /*****************************************************************************/ | ||
47 | /* the internal buffer */ | ||
48 | /*****************************************************************************/ | ||
49 | |||
50 | /* | ||
51 | * The internal buffer is meant to store obaque blobs of data, so it doesn't | ||
52 | * know of higher level concepts like triggers. | ||
53 | * It consists of a number of pages that are used as a ringbuffer. Each data | ||
54 | * blob is stored in a simple record that consists of an integer, which | ||
55 | * contains the size of the following data, and the data bytes themselfes. | ||
56 | * | ||
57 | * To allow for multiple independent readers we create one internal buffer | ||
58 | * each time the device is opened and destroy the buffer when the file is | ||
59 | * closed again. | ||
60 | * | ||
61 | * One record can be written to a buffer by using the functions | ||
62 | * - dasd_eer_start_record (one time per record to write the size to the buffer | ||
63 | * and reserve the space for the data) | ||
64 | * - dasd_eer_write_buffer (one or more times per record to write the data) | ||
65 | * The data can be written in several steps but you will have to compute | ||
66 | * the total size up front for the invocation of dasd_eer_start_record. | ||
67 | * If the ringbuffer is full, dasd_eer_start_record will remove the required | ||
68 | * number of old records. | ||
69 | * | ||
70 | * A record is typically read in two steps, first read the integer that | ||
71 | * specifies the size of the following data, then read the data. | ||
72 | * Both can be done by | ||
73 | * - dasd_eer_read_buffer | ||
74 | * | ||
75 | * For all mentioned functions you need to get the bufferlock first and keep it | ||
76 | * until a complete record is written or read. | ||
77 | */ | ||
78 | |||
79 | |||
80 | /* | ||
81 | * Alle information necessary to keep track of an internal buffer is kept in | ||
82 | * a struct eerbuffer. The buffer specific to a file pointer is strored in | ||
83 | * the private_data field of that file. To be able to write data to all | ||
84 | * existing buffers, each buffer is also added to the bufferlist. | ||
85 | * If the user doesn't want to read a complete record in one go, we have to | ||
86 | * keep track of the rest of the record. residual stores the number of bytes | ||
87 | * that are still to deliver. If the rest of the record is invalidated between | ||
88 | * two reads then residual will be set to -1 so that the next read will fail. | ||
89 | * All entries in the eerbuffer structure are protected with the bufferlock. | ||
90 | * To avoid races between writing to a buffer on the one side and creating | ||
91 | * and destroying buffers on the other side, the bufferlock must also be used | ||
92 | * to protect the bufferlist. | ||
93 | */ | ||
94 | |||
95 | struct eerbuffer { | ||
96 | struct list_head list; | ||
97 | char **buffer; | ||
98 | int buffersize; | ||
99 | int buffer_page_count; | ||
100 | int head; | ||
101 | int tail; | ||
102 | int residual; | ||
103 | }; | ||
104 | |||
105 | LIST_HEAD(bufferlist); | ||
106 | |||
107 | static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; | ||
108 | |||
109 | DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); | ||
110 | |||
111 | /* | ||
112 | * How many free bytes are available on the buffer. | ||
113 | * needs to be called with bufferlock held | ||
114 | */ | ||
115 | static int | ||
116 | dasd_eer_get_free_bytes(struct eerbuffer *eerb) | ||
117 | { | ||
118 | if (eerb->head < eerb->tail) { | ||
119 | return eerb->tail - eerb->head - 1; | ||
120 | } else | ||
121 | return eerb->buffersize - eerb->head + eerb->tail -1; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * How many bytes of buffer space are used. | ||
126 | * needs to be called with bufferlock held | ||
127 | */ | ||
128 | static int | ||
129 | dasd_eer_get_filled_bytes(struct eerbuffer *eerb) | ||
130 | { | ||
131 | |||
132 | if (eerb->head >= eerb->tail) { | ||
133 | return eerb->head - eerb->tail; | ||
134 | } else | ||
135 | return eerb->buffersize - eerb->tail + eerb->head; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * The dasd_eer_write_buffer function just copies count bytes of data | ||
140 | * to the buffer. Make sure to call dasd_eer_start_record first, to | ||
141 | * make sure that enough free space is available. | ||
142 | * needs to be called with bufferlock held | ||
143 | */ | ||
144 | static void | ||
145 | dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data) | ||
146 | { | ||
147 | |||
148 | unsigned long headindex,localhead; | ||
149 | unsigned long rest, len; | ||
150 | char *nextdata; | ||
151 | |||
152 | nextdata = data; | ||
153 | rest = count; | ||
154 | while (rest > 0) { | ||
155 | headindex = eerb->head / PAGE_SIZE; | ||
156 | localhead = eerb->head % PAGE_SIZE; | ||
157 | len = min(rest, (PAGE_SIZE - localhead)); | ||
158 | memcpy(eerb->buffer[headindex]+localhead, nextdata, len); | ||
159 | nextdata += len; | ||
160 | rest -= len; | ||
161 | eerb->head += len; | ||
162 | if ( eerb->head == eerb->buffersize ) | ||
163 | eerb->head = 0; /* wrap around */ | ||
164 | if (eerb->head > eerb->buffersize) { | ||
165 | MESSAGE(KERN_ERR, "%s", "runaway buffer head."); | ||
166 | BUG(); | ||
167 | } | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * needs to be called with bufferlock held | ||
173 | */ | ||
174 | static int | ||
175 | dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data) | ||
176 | { | ||
177 | |||
178 | unsigned long tailindex,localtail; | ||
179 | unsigned long rest, len, finalcount; | ||
180 | char *nextdata; | ||
181 | |||
182 | finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); | ||
183 | nextdata = data; | ||
184 | rest = finalcount; | ||
185 | while (rest > 0) { | ||
186 | tailindex = eerb->tail / PAGE_SIZE; | ||
187 | localtail = eerb->tail % PAGE_SIZE; | ||
188 | len = min(rest, (PAGE_SIZE - localtail)); | ||
189 | memcpy(nextdata, eerb->buffer[tailindex]+localtail, len); | ||
190 | nextdata += len; | ||
191 | rest -= len; | ||
192 | eerb->tail += len; | ||
193 | if ( eerb->tail == eerb->buffersize ) | ||
194 | eerb->tail = 0; /* wrap around */ | ||
195 | if (eerb->tail > eerb->buffersize) { | ||
196 | MESSAGE(KERN_ERR, "%s", "runaway buffer tail."); | ||
197 | BUG(); | ||
198 | } | ||
199 | } | ||
200 | return finalcount; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Whenever you want to write a blob of data to the internal buffer you | ||
205 | * have to start by using this function first. It will write the number | ||
206 | * of bytes that will be written to the buffer. If necessary it will remove | ||
207 | * old records to make room for the new one. | ||
208 | * needs to be called with bufferlock held | ||
209 | */ | ||
210 | static int | ||
211 | dasd_eer_start_record(struct eerbuffer *eerb, int count) | ||
212 | { | ||
213 | int tailcount; | ||
214 | if (count + sizeof(count) > eerb->buffersize) | ||
215 | return -ENOMEM; | ||
216 | while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { | ||
217 | if (eerb->residual > 0) { | ||
218 | eerb->tail += eerb->residual; | ||
219 | if (eerb->tail >= eerb->buffersize) | ||
220 | eerb->tail -= eerb->buffersize; | ||
221 | eerb->residual = -1; | ||
222 | } | ||
223 | dasd_eer_read_buffer(eerb, sizeof(tailcount), | ||
224 | (char*)(&tailcount)); | ||
225 | eerb->tail += tailcount; | ||
226 | if (eerb->tail >= eerb->buffersize) | ||
227 | eerb->tail -= eerb->buffersize; | ||
228 | } | ||
229 | dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count)); | ||
230 | |||
231 | return 0; | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * release pages that are not used anymore | ||
236 | */ | ||
237 | static void | ||
238 | dasd_eer_free_buffer_pages(char **buf, int no_pages) | ||
239 | { | ||
240 | int i; | ||
241 | |||
242 | for (i = 0; i < no_pages; ++i) { | ||
243 | free_page((unsigned long)buf[i]); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * allocate a new set of memory pages | ||
249 | */ | ||
250 | static int | ||
251 | dasd_eer_allocate_buffer_pages(char **buf, int no_pages) | ||
252 | { | ||
253 | int i; | ||
254 | |||
255 | for (i = 0; i < no_pages; ++i) { | ||
256 | buf[i] = (char *) get_zeroed_page(GFP_KERNEL); | ||
257 | if (!buf[i]) { | ||
258 | dasd_eer_free_buffer_pages(buf, i); | ||
259 | return -ENOMEM; | ||
260 | } | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * empty the buffer by resetting head and tail | ||
267 | * In case there is a half read data blob in the buffer, we set residual | ||
268 | * to -1 to indicate that the remainder of the blob is lost. | ||
269 | */ | ||
270 | static void | ||
271 | dasd_eer_purge_buffer(struct eerbuffer *eerb) | ||
272 | { | ||
273 | unsigned long flags; | ||
274 | |||
275 | spin_lock_irqsave(&bufferlock, flags); | ||
276 | if (eerb->residual > 0) | ||
277 | eerb->residual = -1; | ||
278 | eerb->tail=0; | ||
279 | eerb->head=0; | ||
280 | spin_unlock_irqrestore(&bufferlock, flags); | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * set the size of the buffer, newsize is the new number of pages to be used | ||
285 | * we don't try to copy any data back an forth, so any resize will also purge | ||
286 | * the buffer | ||
287 | */ | ||
288 | static int | ||
289 | dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize) | ||
290 | { | ||
291 | int i, oldcount, reuse; | ||
292 | char **new; | ||
293 | char **old; | ||
294 | unsigned long flags; | ||
295 | |||
296 | if (newsize < 1) | ||
297 | return -EINVAL; | ||
298 | if (eerb->buffer_page_count == newsize) { | ||
299 | /* documented behaviour is that any successfull invocation | ||
300 | * will purge all records */ | ||
301 | dasd_eer_purge_buffer(eerb); | ||
302 | return 0; | ||
303 | } | ||
304 | new = kmalloc(newsize*sizeof(char*), GFP_KERNEL); | ||
305 | if (!new) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | reuse=min(eerb->buffer_page_count, newsize); | ||
309 | for (i = 0; i < reuse; ++i) { | ||
310 | new[i] = eerb->buffer[i]; | ||
311 | } | ||
312 | if (eerb->buffer_page_count < newsize) { | ||
313 | if (dasd_eer_allocate_buffer_pages( | ||
314 | &new[eerb->buffer_page_count], | ||
315 | newsize - eerb->buffer_page_count)) { | ||
316 | kfree(new); | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | spin_lock_irqsave(&bufferlock, flags); | ||
322 | old = eerb->buffer; | ||
323 | eerb->buffer = new; | ||
324 | if (eerb->residual > 0) | ||
325 | eerb->residual = -1; | ||
326 | eerb->tail = 0; | ||
327 | eerb->head = 0; | ||
328 | oldcount = eerb->buffer_page_count; | ||
329 | eerb->buffer_page_count = newsize; | ||
330 | spin_unlock_irqrestore(&bufferlock, flags); | ||
331 | |||
332 | if (oldcount > newsize) { | ||
333 | for (i = newsize; i < oldcount; ++i) { | ||
334 | free_page((unsigned long)old[i]); | ||
335 | } | ||
336 | } | ||
337 | kfree(old); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | |||
343 | /*****************************************************************************/ | ||
344 | /* The extended error reporting functionality */ | ||
345 | /*****************************************************************************/ | ||
346 | |||
347 | /* | ||
348 | * When a DASD device driver wants to report an error, it calls the | ||
349 | * function dasd_eer_write_trigger (via a notifier mechanism) and gives the | ||
350 | * respective trigger ID as parameter. | ||
351 | * Currently there are four kinds of triggers: | ||
352 | * | ||
353 | * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems | ||
354 | * DASD_EER_PPRCSUSPEND: PPRC was suspended | ||
355 | * DASD_EER_NOPATH: There is no path to the device left. | ||
356 | * DASD_EER_STATECHANGE: The state of the device has changed. | ||
357 | * | ||
358 | * For the first three triggers all required information can be supplied by | ||
359 | * the caller. For these triggers a record is written by the function | ||
360 | * dasd_eer_write_standard_trigger. | ||
361 | * | ||
362 | * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE | ||
363 | * trigger, we have to gather the necessary sense data first. We cannot queue | ||
364 | * the necessary SNSS (sense subsystem status) request immediatly, since we | ||
365 | * are likely to run in a deadlock situation. Instead, we schedule a | ||
366 | * work_struct that calls the function dasd_eer_sense_subsystem_status to | ||
367 | * create and start an SNSS request asynchronously. | ||
368 | * | ||
369 | * To avoid memory allocations at runtime, the necessary memory is allocated | ||
370 | * when the extended error reporting is enabled for a device (by | ||
371 | * dasd_eer_probe). There is one private eer data structure for each eer | ||
372 | * enabled DASD device. It contains memory for the work_struct, one SNSS cqr | ||
373 | * and a flags field that is used to coordinate the use of the cqr. The call | ||
374 | * to write a state change trigger can come in at any time, so we have one flag | ||
375 | * CQR_IN_USE that protects the cqr itself. When this flag indicates that the | ||
376 | * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a | ||
377 | * second request but sets the SNSS_REQUESTED flag instead. | ||
378 | * | ||
379 | * When the request is finished, the callback function dasd_eer_SNSS_cb | ||
380 | * is called. This function will invoke the function | ||
381 | * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also | ||
382 | * check the SNSS_REQUESTED flag and if it is set it will call | ||
383 | * dasd_eer_sense_subsystem_status again. | ||
384 | * | ||
385 | * To avoid race conditions during the handling of the lock, the flags must | ||
386 | * be protected by the snsslock. | ||
387 | */ | ||
388 | |||
389 | struct dasd_eer_private { | ||
390 | struct dasd_ccw_req *cqr; | ||
391 | unsigned long flags; | ||
392 | struct work_struct worker; | ||
393 | }; | ||
394 | |||
395 | static void dasd_eer_destroy(struct dasd_device *device, | ||
396 | struct dasd_eer_private *eer); | ||
397 | static int | ||
398 | dasd_eer_write_trigger(struct dasd_eer_trigger *trigger); | ||
399 | static void dasd_eer_sense_subsystem_status(void *data); | ||
400 | static int dasd_eer_notify(struct notifier_block *self, | ||
401 | unsigned long action, void *data); | ||
402 | |||
403 | struct workqueue_struct *dasd_eer_workqueue; | ||
404 | |||
405 | #define SNSS_DATA_SIZE 44 | ||
406 | static spinlock_t snsslock = SPIN_LOCK_UNLOCKED; | ||
407 | |||
408 | #define DASD_EER_BUSID_SIZE 10 | ||
409 | struct dasd_eer_header { | ||
410 | __u32 total_size; | ||
411 | __u32 trigger; | ||
412 | __u64 tv_sec; | ||
413 | __u64 tv_usec; | ||
414 | char busid[DASD_EER_BUSID_SIZE]; | ||
415 | } __attribute__ ((packed)); | ||
416 | |||
417 | static struct notifier_block dasd_eer_nb = { | ||
418 | .notifier_call = dasd_eer_notify, | ||
419 | }; | ||
420 | |||
421 | /* | ||
422 | * flags for use with dasd_eer_private | ||
423 | */ | ||
424 | #define CQR_IN_USE 0 | ||
425 | #define SNSS_REQUESTED 1 | ||
426 | |||
427 | /* | ||
428 | * This function checks if extended error reporting is available for a given | ||
429 | * dasd_device. If yes, then it creates and returns a struct dasd_eer, | ||
430 | * otherwise it returns an -EPERM error pointer. | ||
431 | */ | ||
432 | struct dasd_eer_private * | ||
433 | dasd_eer_probe(struct dasd_device *device) | ||
434 | { | ||
435 | struct dasd_eer_private *private; | ||
436 | |||
437 | if (!(device && device->discipline | ||
438 | && !strcmp(device->discipline->name, "ECKD"))) { | ||
439 | return ERR_PTR(-EPERM); | ||
440 | } | ||
441 | /* allocate the private data structure */ | ||
442 | private = (struct dasd_eer_private *)kmalloc( | ||
443 | sizeof(struct dasd_eer_private), GFP_KERNEL); | ||
444 | if (!private) { | ||
445 | return ERR_PTR(-ENOMEM); | ||
446 | } | ||
447 | INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status, | ||
448 | (void *)device); | ||
449 | private->cqr = dasd_kmalloc_request("ECKD", | ||
450 | 1 /* SNSS */ , | ||
451 | SNSS_DATA_SIZE , | ||
452 | device); | ||
453 | if (!private->cqr) { | ||
454 | kfree(private); | ||
455 | return ERR_PTR(-ENOMEM); | ||
456 | } | ||
457 | private->flags = 0; | ||
458 | return private; | ||
459 | }; | ||
460 | |||
461 | /* | ||
462 | * If our private SNSS request is queued, remove it from the | ||
463 | * dasd ccw queue so we can free the requests memory. | ||
464 | */ | ||
465 | static void | ||
466 | dasd_eer_dequeue_SNSS_request(struct dasd_device *device, | ||
467 | struct dasd_eer_private *eer) | ||
468 | { | ||
469 | struct list_head *lst, *nxt; | ||
470 | struct dasd_ccw_req *cqr, *erpcqr; | ||
471 | dasd_erp_fn_t erp_fn; | ||
472 | |||
473 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
474 | list_for_each_safe(lst, nxt, &device->ccw_queue) { | ||
475 | cqr = list_entry(lst, struct dasd_ccw_req, list); | ||
476 | /* we are looking for two kinds or requests */ | ||
477 | /* first kind: our SNSS request: */ | ||
478 | if (cqr == eer->cqr) { | ||
479 | if (cqr->status == DASD_CQR_IN_IO) | ||
480 | device->discipline->term_IO(cqr); | ||
481 | list_del(&cqr->list); | ||
482 | break; | ||
483 | } | ||
484 | /* second kind: ERP requests for our SNSS request */ | ||
485 | if (cqr->refers) { | ||
486 | /* If this erp request chain ends in our cqr, then */ | ||
487 | /* cal the erp_postaction to clean it up */ | ||
488 | erpcqr = cqr; | ||
489 | while (erpcqr->refers) { | ||
490 | erpcqr = erpcqr->refers; | ||
491 | } | ||
492 | if (erpcqr == eer->cqr) { | ||
493 | erp_fn = device->discipline->erp_postaction( | ||
494 | cqr); | ||
495 | erp_fn(cqr); | ||
496 | } | ||
497 | continue; | ||
498 | } | ||
499 | } | ||
500 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * This function dismantles a struct dasd_eer that was created by | ||
505 | * dasd_eer_probe. Since we want to free our private data structure, | ||
506 | * we must make sure that the memory is not in use anymore. | ||
507 | * We have to flush the work queue and remove a possible SNSS request | ||
508 | * from the dasd queue. | ||
509 | */ | ||
510 | static void | ||
511 | dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer) | ||
512 | { | ||
513 | flush_workqueue(dasd_eer_workqueue); | ||
514 | dasd_eer_dequeue_SNSS_request(device, eer); | ||
515 | dasd_kfree_request(eer->cqr, device); | ||
516 | kfree(eer); | ||
517 | }; | ||
518 | |||
519 | /* | ||
520 | * enable the extended error reporting for a particular device | ||
521 | */ | ||
522 | static int | ||
523 | dasd_eer_enable_on_device(struct dasd_device *device) | ||
524 | { | ||
525 | void *eer; | ||
526 | if (!device) | ||
527 | return -ENODEV; | ||
528 | if (device->eer) | ||
529 | return 0; | ||
530 | if (!try_module_get(THIS_MODULE)) { | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | eer = (void *)dasd_eer_probe(device); | ||
534 | if (IS_ERR(eer)) { | ||
535 | module_put(THIS_MODULE); | ||
536 | return PTR_ERR(eer); | ||
537 | } | ||
538 | device->eer = eer; | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * enable the extended error reporting for a particular device | ||
544 | */ | ||
545 | static int | ||
546 | dasd_eer_disable_on_device(struct dasd_device *device) | ||
547 | { | ||
548 | struct dasd_eer_private *eer = device->eer; | ||
549 | |||
550 | if (!device) | ||
551 | return -ENODEV; | ||
552 | if (!device->eer) | ||
553 | return 0; | ||
554 | device->eer = NULL; | ||
555 | dasd_eer_destroy(device,eer); | ||
556 | module_put(THIS_MODULE); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Set extended error reporting (eer) | ||
563 | * Note: This will be registered as a DASD ioctl, to be called on DASD devices. | ||
564 | */ | ||
565 | static int | ||
566 | dasd_ioctl_set_eer(struct block_device *bdev, int no, long args) | ||
567 | { | ||
568 | struct dasd_device *device; | ||
569 | int intval; | ||
570 | |||
571 | if (!capable(CAP_SYS_ADMIN)) | ||
572 | return -EACCES; | ||
573 | if (bdev != bdev->bd_contains) | ||
574 | /* Error-reporting is not allowed for partitions */ | ||
575 | return -EINVAL; | ||
576 | if (get_user(intval, (int __user *) args)) | ||
577 | return -EFAULT; | ||
578 | device = bdev->bd_disk->private_data; | ||
579 | if (device == NULL) | ||
580 | return -ENODEV; | ||
581 | |||
582 | intval = (intval != 0); | ||
583 | DEV_MESSAGE (KERN_DEBUG, device, | ||
584 | "set eer on device to %d", intval); | ||
585 | if (intval) | ||
586 | return dasd_eer_enable_on_device(device); | ||
587 | else | ||
588 | return dasd_eer_disable_on_device(device); | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Get value of extended error reporting. | ||
593 | * Note: This will be registered as a DASD ioctl, to be called on DASD devices. | ||
594 | */ | ||
595 | static int | ||
596 | dasd_ioctl_get_eer(struct block_device *bdev, int no, long args) | ||
597 | { | ||
598 | struct dasd_device *device; | ||
599 | |||
600 | device = bdev->bd_disk->private_data; | ||
601 | if (device == NULL) | ||
602 | return -ENODEV; | ||
603 | return put_user((device->eer != NULL), (int __user *) args); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * The following function can be used for those triggers that have | ||
608 | * all necessary data available when the function is called. | ||
609 | * If the parameter cqr is not NULL, the chain of requests will be searched | ||
610 | * for valid sense data, and all valid sense data sets will be added to | ||
611 | * the triggers data. | ||
612 | */ | ||
613 | static int | ||
614 | dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device, | ||
615 | struct dasd_ccw_req *cqr) | ||
616 | { | ||
617 | struct dasd_ccw_req *temp_cqr; | ||
618 | int data_size; | ||
619 | struct timeval tv; | ||
620 | struct dasd_eer_header header; | ||
621 | unsigned long flags; | ||
622 | struct eerbuffer *eerb; | ||
623 | |||
624 | /* go through cqr chain and count the valid sense data sets */ | ||
625 | temp_cqr = cqr; | ||
626 | data_size = 0; | ||
627 | while (temp_cqr) { | ||
628 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
629 | data_size += 32; | ||
630 | temp_cqr = temp_cqr->refers; | ||
631 | } | ||
632 | |||
633 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
634 | header.trigger = trigger; | ||
635 | do_gettimeofday(&tv); | ||
636 | header.tv_sec = tv.tv_sec; | ||
637 | header.tv_usec = tv.tv_usec; | ||
638 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
639 | |||
640 | spin_lock_irqsave(&bufferlock, flags); | ||
641 | list_for_each_entry(eerb, &bufferlist, list) { | ||
642 | dasd_eer_start_record(eerb, header.total_size); | ||
643 | dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header)); | ||
644 | temp_cqr = cqr; | ||
645 | while (temp_cqr) { | ||
646 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
647 | dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw); | ||
648 | temp_cqr = temp_cqr->refers; | ||
649 | } | ||
650 | dasd_eer_write_buffer(eerb, 4,"EOR"); | ||
651 | } | ||
652 | spin_unlock_irqrestore(&bufferlock, flags); | ||
653 | |||
654 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * This function writes a DASD_EER_STATECHANGE trigger. | ||
661 | */ | ||
662 | static void | ||
663 | dasd_eer_write_SNSS_trigger(struct dasd_device *device, | ||
664 | struct dasd_ccw_req *cqr) | ||
665 | { | ||
666 | int data_size; | ||
667 | int snss_rc; | ||
668 | struct timeval tv; | ||
669 | struct dasd_eer_header header; | ||
670 | unsigned long flags; | ||
671 | struct eerbuffer *eerb; | ||
672 | |||
673 | snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | ||
674 | if (snss_rc) | ||
675 | data_size = 0; | ||
676 | else | ||
677 | data_size = SNSS_DATA_SIZE; | ||
678 | |||
679 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
680 | header.trigger = DASD_EER_STATECHANGE; | ||
681 | do_gettimeofday(&tv); | ||
682 | header.tv_sec = tv.tv_sec; | ||
683 | header.tv_usec = tv.tv_usec; | ||
684 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
685 | |||
686 | spin_lock_irqsave(&bufferlock, flags); | ||
687 | list_for_each_entry(eerb, &bufferlist, list) { | ||
688 | dasd_eer_start_record(eerb, header.total_size); | ||
689 | dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header)); | ||
690 | if (!snss_rc) | ||
691 | dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data); | ||
692 | dasd_eer_write_buffer(eerb, 4,"EOR"); | ||
693 | } | ||
694 | spin_unlock_irqrestore(&bufferlock, flags); | ||
695 | |||
696 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * callback function for use with SNSS request | ||
701 | */ | ||
702 | static void | ||
703 | dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data) | ||
704 | { | ||
705 | struct dasd_device *device; | ||
706 | struct dasd_eer_private *private; | ||
707 | unsigned long irqflags; | ||
708 | |||
709 | device = (struct dasd_device *)data; | ||
710 | private = (struct dasd_eer_private *)device->eer; | ||
711 | dasd_eer_write_SNSS_trigger(device, cqr); | ||
712 | spin_lock_irqsave(&snsslock, irqflags); | ||
713 | if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) { | ||
714 | clear_bit(CQR_IN_USE, &private->flags); | ||
715 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
716 | return; | ||
717 | }; | ||
718 | clear_bit(CQR_IN_USE, &private->flags); | ||
719 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
720 | dasd_eer_sense_subsystem_status(device); | ||
721 | return; | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * clean a used cqr before using it again | ||
726 | */ | ||
727 | static void | ||
728 | dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr) | ||
729 | { | ||
730 | struct ccw1 *cpaddr = cqr->cpaddr; | ||
731 | void *data = cqr->data; | ||
732 | |||
733 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | ||
734 | memset(cpaddr, 0, sizeof(struct ccw1)); | ||
735 | memset(data, 0, SNSS_DATA_SIZE); | ||
736 | cqr->cpaddr = cpaddr; | ||
737 | cqr->data = data; | ||
738 | strncpy((char *) &cqr->magic, "ECKD", 4); | ||
739 | ASCEBC((char *) &cqr->magic, 4); | ||
740 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * build and start an SNSS request | ||
745 | * This function is called from a work queue so we have to | ||
746 | * pass the dasd_device pointer as a void pointer. | ||
747 | */ | ||
748 | static void | ||
749 | dasd_eer_sense_subsystem_status(void *data) | ||
750 | { | ||
751 | struct dasd_device *device; | ||
752 | struct dasd_eer_private *private; | ||
753 | struct dasd_ccw_req *cqr; | ||
754 | struct ccw1 *ccw; | ||
755 | unsigned long irqflags; | ||
756 | |||
757 | device = (struct dasd_device *)data; | ||
758 | private = (struct dasd_eer_private *)device->eer; | ||
759 | if (!private) /* device not eer enabled any more */ | ||
760 | return; | ||
761 | cqr = private->cqr; | ||
762 | spin_lock_irqsave(&snsslock, irqflags); | ||
763 | if(test_and_set_bit(CQR_IN_USE, &private->flags)) { | ||
764 | set_bit(SNSS_REQUESTED, &private->flags); | ||
765 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
766 | return; | ||
767 | }; | ||
768 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
769 | dasd_eer_clean_SNSS_request(cqr); | ||
770 | cqr->device = device; | ||
771 | cqr->retries = 255; | ||
772 | cqr->expires = 10 * HZ; | ||
773 | |||
774 | ccw = cqr->cpaddr; | ||
775 | ccw->cmd_code = DASD_ECKD_CCW_SNSS; | ||
776 | ccw->count = SNSS_DATA_SIZE; | ||
777 | ccw->flags = 0; | ||
778 | ccw->cda = (__u32)(addr_t)cqr->data; | ||
779 | |||
780 | cqr->buildclk = get_clock(); | ||
781 | cqr->status = DASD_CQR_FILLED; | ||
782 | cqr->callback = dasd_eer_SNSS_cb; | ||
783 | cqr->callback_data = (void *)device; | ||
784 | dasd_add_request_head(cqr); | ||
785 | |||
786 | return; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * This function is called for all triggers. It calls the appropriate | ||
791 | * function that writes the actual trigger records. | ||
792 | */ | ||
793 | static int | ||
794 | dasd_eer_write_trigger(struct dasd_eer_trigger *trigger) | ||
795 | { | ||
796 | int rc; | ||
797 | struct dasd_eer_private *private = trigger->device->eer; | ||
798 | |||
799 | switch (trigger->id) { | ||
800 | case DASD_EER_FATALERROR: | ||
801 | case DASD_EER_PPRCSUSPEND: | ||
802 | rc = dasd_eer_write_standard_trigger( | ||
803 | trigger->id, trigger->device, trigger->cqr); | ||
804 | break; | ||
805 | case DASD_EER_NOPATH: | ||
806 | rc = dasd_eer_write_standard_trigger( | ||
807 | trigger->id, trigger->device, NULL); | ||
808 | break; | ||
809 | case DASD_EER_STATECHANGE: | ||
810 | if (queue_work(dasd_eer_workqueue, &private->worker)) { | ||
811 | rc=0; | ||
812 | } else { | ||
813 | /* If the work_struct was already queued, it can't | ||
814 | * be queued again. But this is OK since we don't | ||
815 | * need to have it queued twice. | ||
816 | */ | ||
817 | rc = -EBUSY; | ||
818 | } | ||
819 | break; | ||
820 | default: /* unknown trigger, so we write it without any sense data */ | ||
821 | rc = dasd_eer_write_standard_trigger( | ||
822 | trigger->id, trigger->device, NULL); | ||
823 | break; | ||
824 | } | ||
825 | return rc; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * This function is registered with the dasd device driver and gets called | ||
830 | * for all dasd eer notifications. | ||
831 | */ | ||
832 | static int dasd_eer_notify(struct notifier_block *self, | ||
833 | unsigned long action, void *data) | ||
834 | { | ||
835 | switch (action) { | ||
836 | case DASD_EER_DISABLE: | ||
837 | dasd_eer_disable_on_device((struct dasd_device *)data); | ||
838 | break; | ||
839 | case DASD_EER_TRIGGER: | ||
840 | dasd_eer_write_trigger((struct dasd_eer_trigger *)data); | ||
841 | break; | ||
842 | } | ||
843 | return NOTIFY_OK; | ||
844 | } | ||
845 | |||
846 | |||
847 | /*****************************************************************************/ | ||
848 | /* the device operations */ | ||
849 | /*****************************************************************************/ | ||
850 | |||
851 | /* | ||
852 | * On the one side we need a lock to access our internal buffer, on the | ||
853 | * other side a copy_to_user can sleep. So we need to copy the data we have | ||
854 | * to transfer in a readbuffer, which is protected by the readbuffer_mutex. | ||
855 | */ | ||
856 | static char readbuffer[PAGE_SIZE]; | ||
857 | DECLARE_MUTEX(readbuffer_mutex); | ||
858 | |||
859 | |||
860 | static int | ||
861 | dasd_eer_open(struct inode *inp, struct file *filp) | ||
862 | { | ||
863 | struct eerbuffer *eerb; | ||
864 | unsigned long flags; | ||
865 | |||
866 | eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL); | ||
867 | eerb->head = 0; | ||
868 | eerb->tail = 0; | ||
869 | eerb->residual = 0; | ||
870 | eerb->buffer_page_count = 1; | ||
871 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; | ||
872 | eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*), | ||
873 | GFP_KERNEL); | ||
874 | if (!eerb->buffer) | ||
875 | return -ENOMEM; | ||
876 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, | ||
877 | eerb->buffer_page_count)) { | ||
878 | kfree(eerb->buffer); | ||
879 | return -ENOMEM; | ||
880 | } | ||
881 | filp->private_data = eerb; | ||
882 | spin_lock_irqsave(&bufferlock, flags); | ||
883 | list_add(&eerb->list, &bufferlist); | ||
884 | spin_unlock_irqrestore(&bufferlock, flags); | ||
885 | |||
886 | return nonseekable_open(inp,filp); | ||
887 | } | ||
888 | |||
889 | static int | ||
890 | dasd_eer_close(struct inode *inp, struct file *filp) | ||
891 | { | ||
892 | struct eerbuffer *eerb; | ||
893 | unsigned long flags; | ||
894 | |||
895 | eerb = (struct eerbuffer *)filp->private_data; | ||
896 | spin_lock_irqsave(&bufferlock, flags); | ||
897 | list_del(&eerb->list); | ||
898 | spin_unlock_irqrestore(&bufferlock, flags); | ||
899 | dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); | ||
900 | kfree(eerb->buffer); | ||
901 | kfree(eerb); | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | static long | ||
907 | dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
908 | { | ||
909 | int intval; | ||
910 | struct eerbuffer *eerb; | ||
911 | |||
912 | eerb = (struct eerbuffer *)filp->private_data; | ||
913 | switch (cmd) { | ||
914 | case DASD_EER_PURGE: | ||
915 | dasd_eer_purge_buffer(eerb); | ||
916 | return 0; | ||
917 | case DASD_EER_SETBUFSIZE: | ||
918 | if (get_user(intval, (int __user *)arg)) | ||
919 | return -EFAULT; | ||
920 | return dasd_eer_resize_buffer(eerb, intval); | ||
921 | default: | ||
922 | return -ENOIOCTLCMD; | ||
923 | } | ||
924 | } | ||
925 | |||
926 | static ssize_t | ||
927 | dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | ||
928 | { | ||
929 | int tc,rc; | ||
930 | int tailcount,effective_count; | ||
931 | unsigned long flags; | ||
932 | struct eerbuffer *eerb; | ||
933 | |||
934 | eerb = (struct eerbuffer *)filp->private_data; | ||
935 | if(down_interruptible(&readbuffer_mutex)) | ||
936 | return -ERESTARTSYS; | ||
937 | |||
938 | spin_lock_irqsave(&bufferlock, flags); | ||
939 | |||
940 | if (eerb->residual < 0) { /* the remainder of this record */ | ||
941 | /* has been deleted */ | ||
942 | eerb->residual = 0; | ||
943 | spin_unlock_irqrestore(&bufferlock, flags); | ||
944 | up(&readbuffer_mutex); | ||
945 | return -EIO; | ||
946 | } else if (eerb->residual > 0) { | ||
947 | /* OK we still have a second half of a record to deliver */ | ||
948 | effective_count = min(eerb->residual, (int)count); | ||
949 | eerb->residual -= effective_count; | ||
950 | } else { | ||
951 | tc = 0; | ||
952 | while (!tc) { | ||
953 | tc = dasd_eer_read_buffer(eerb, | ||
954 | sizeof(tailcount), (char*)(&tailcount)); | ||
955 | if (!tc) { | ||
956 | /* no data available */ | ||
957 | spin_unlock_irqrestore(&bufferlock, flags); | ||
958 | up(&readbuffer_mutex); | ||
959 | if (filp->f_flags & O_NONBLOCK) | ||
960 | return -EAGAIN; | ||
961 | rc = wait_event_interruptible( | ||
962 | dasd_eer_read_wait_queue, | ||
963 | eerb->head != eerb->tail); | ||
964 | if (rc) { | ||
965 | return rc; | ||
966 | } | ||
967 | if(down_interruptible(&readbuffer_mutex)) | ||
968 | return -ERESTARTSYS; | ||
969 | spin_lock_irqsave(&bufferlock, flags); | ||
970 | } | ||
971 | } | ||
972 | WARN_ON(tc != sizeof(tailcount)); | ||
973 | effective_count = min(tailcount,(int)count); | ||
974 | eerb->residual = tailcount - effective_count; | ||
975 | } | ||
976 | |||
977 | tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer); | ||
978 | WARN_ON(tc != effective_count); | ||
979 | |||
980 | spin_unlock_irqrestore(&bufferlock, flags); | ||
981 | |||
982 | if (copy_to_user(buf, readbuffer, effective_count)) { | ||
983 | up(&readbuffer_mutex); | ||
984 | return -EFAULT; | ||
985 | } | ||
986 | |||
987 | up(&readbuffer_mutex); | ||
988 | return effective_count; | ||
989 | } | ||
990 | |||
991 | static unsigned int | ||
992 | dasd_eer_poll (struct file *filp, poll_table *ptable) | ||
993 | { | ||
994 | unsigned int mask; | ||
995 | unsigned long flags; | ||
996 | struct eerbuffer *eerb; | ||
997 | |||
998 | eerb = (struct eerbuffer *)filp->private_data; | ||
999 | poll_wait(filp, &dasd_eer_read_wait_queue, ptable); | ||
1000 | spin_lock_irqsave(&bufferlock, flags); | ||
1001 | if (eerb->head != eerb->tail) | ||
1002 | mask = POLLIN | POLLRDNORM ; | ||
1003 | else | ||
1004 | mask = 0; | ||
1005 | spin_unlock_irqrestore(&bufferlock, flags); | ||
1006 | return mask; | ||
1007 | } | ||
1008 | |||
1009 | static struct file_operations dasd_eer_fops = { | ||
1010 | .open = &dasd_eer_open, | ||
1011 | .release = &dasd_eer_close, | ||
1012 | .unlocked_ioctl = &dasd_eer_ioctl, | ||
1013 | .compat_ioctl = &dasd_eer_ioctl, | ||
1014 | .read = &dasd_eer_read, | ||
1015 | .poll = &dasd_eer_poll, | ||
1016 | .owner = THIS_MODULE, | ||
1017 | }; | ||
1018 | |||
1019 | static struct miscdevice dasd_eer_dev = { | ||
1020 | .minor = MISC_DYNAMIC_MINOR, | ||
1021 | .name = "dasd_eer", | ||
1022 | .fops = &dasd_eer_fops, | ||
1023 | }; | ||
1024 | |||
1025 | |||
1026 | /*****************************************************************************/ | ||
1027 | /* Init and exit */ | ||
1028 | /*****************************************************************************/ | ||
1029 | |||
1030 | static int | ||
1031 | __init dasd_eer_init(void) | ||
1032 | { | ||
1033 | int rc; | ||
1034 | |||
1035 | dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer"); | ||
1036 | if (!dasd_eer_workqueue) { | ||
1037 | MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not " | ||
1038 | "create workqueue \n"); | ||
1039 | rc = -ENOMEM; | ||
1040 | goto out; | ||
1041 | } | ||
1042 | |||
1043 | rc = dasd_register_eer_notifier(&dasd_eer_nb); | ||
1044 | if (rc) { | ||
1045 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
1046 | "register error reporting"); | ||
1047 | goto queue; | ||
1048 | } | ||
1049 | |||
1050 | dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer); | ||
1051 | dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer); | ||
1052 | |||
1053 | /* we don't need our own character device, | ||
1054 | * so we just register as misc device */ | ||
1055 | rc = misc_register(&dasd_eer_dev); | ||
1056 | if (rc) { | ||
1057 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
1058 | "register misc device"); | ||
1059 | goto unregister; | ||
1060 | } | ||
1061 | |||
1062 | return 0; | ||
1063 | |||
1064 | unregister: | ||
1065 | dasd_unregister_eer_notifier(&dasd_eer_nb); | ||
1066 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, | ||
1067 | dasd_ioctl_set_eer); | ||
1068 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, | ||
1069 | dasd_ioctl_get_eer); | ||
1070 | queue: | ||
1071 | destroy_workqueue(dasd_eer_workqueue); | ||
1072 | out: | ||
1073 | return rc; | ||
1074 | |||
1075 | } | ||
1076 | module_init(dasd_eer_init); | ||
1077 | |||
1078 | static void | ||
1079 | __exit dasd_eer_exit(void) | ||
1080 | { | ||
1081 | dasd_unregister_eer_notifier(&dasd_eer_nb); | ||
1082 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, | ||
1083 | dasd_ioctl_set_eer); | ||
1084 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, | ||
1085 | dasd_ioctl_get_eer); | ||
1086 | destroy_workqueue(dasd_eer_workqueue); | ||
1087 | |||
1088 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | ||
1089 | } | ||
1090 | module_exit(dasd_eer_exit); | ||
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index d1b08fa13fd2..0592354cc604 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -275,34 +275,6 @@ struct dasd_discipline { | |||
275 | 275 | ||
276 | extern struct dasd_discipline *dasd_diag_discipline_pointer; | 276 | extern struct dasd_discipline *dasd_diag_discipline_pointer; |
277 | 277 | ||
278 | |||
279 | /* | ||
280 | * Notification numbers for extended error reporting notifications: | ||
281 | * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's | ||
282 | * eer pointer) is freed. The error reporting module needs to do all necessary | ||
283 | * cleanup steps. | ||
284 | * The DASD_EER_TRIGGER notification sends the actual error reports (triggers). | ||
285 | */ | ||
286 | #define DASD_EER_DISABLE 0 | ||
287 | #define DASD_EER_TRIGGER 1 | ||
288 | |||
289 | /* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */ | ||
290 | #define DASD_EER_FATALERROR 1 | ||
291 | #define DASD_EER_NOPATH 2 | ||
292 | #define DASD_EER_STATECHANGE 3 | ||
293 | #define DASD_EER_PPRCSUSPEND 4 | ||
294 | |||
295 | /* | ||
296 | * The dasd_eer_trigger structure contains all data that we need to send | ||
297 | * along with an DASD_EER_TRIGGER notification. | ||
298 | */ | ||
299 | struct dasd_eer_trigger { | ||
300 | unsigned int id; | ||
301 | struct dasd_device *device; | ||
302 | struct dasd_ccw_req *cqr; | ||
303 | }; | ||
304 | |||
305 | |||
306 | struct dasd_device { | 278 | struct dasd_device { |
307 | /* Block device stuff. */ | 279 | /* Block device stuff. */ |
308 | struct gendisk *gdp; | 280 | struct gendisk *gdp; |
@@ -316,11 +288,9 @@ struct dasd_device { | |||
316 | unsigned long flags; /* per device flags */ | 288 | unsigned long flags; /* per device flags */ |
317 | unsigned short features; /* copy of devmap-features (read-only!) */ | 289 | unsigned short features; /* copy of devmap-features (read-only!) */ |
318 | 290 | ||
319 | /* extended error reporting stuff (eer) */ | ||
320 | void *eer; | ||
321 | |||
322 | /* Device discipline stuff. */ | 291 | /* Device discipline stuff. */ |
323 | struct dasd_discipline *discipline; | 292 | struct dasd_discipline *discipline; |
293 | struct dasd_discipline *base_discipline; | ||
324 | char *private; | 294 | char *private; |
325 | 295 | ||
326 | /* Device state and target state. */ | 296 | /* Device state and target state. */ |
@@ -519,12 +489,6 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); | |||
519 | int dasd_generic_set_offline (struct ccw_device *cdev); | 489 | int dasd_generic_set_offline (struct ccw_device *cdev); |
520 | int dasd_generic_notify(struct ccw_device *, int); | 490 | int dasd_generic_notify(struct ccw_device *, int); |
521 | void dasd_generic_auto_online (struct ccw_driver *); | 491 | void dasd_generic_auto_online (struct ccw_driver *); |
522 | int dasd_register_eer_notifier(struct notifier_block *); | ||
523 | int dasd_unregister_eer_notifier(struct notifier_block *); | ||
524 | void dasd_write_eer_trigger(unsigned int , struct dasd_device *, | ||
525 | struct dasd_ccw_req *); | ||
526 | |||
527 | |||
528 | 492 | ||
529 | /* externals in dasd_devmap.c */ | 493 | /* externals in dasd_devmap.c */ |
530 | extern int dasd_max_devindex; | 494 | extern int dasd_max_devindex; |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index ceb0e474fde4..4138564402b8 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -85,11 +85,10 @@ static volatile enum sclp_mask_state_t { | |||
85 | /* Maximum retry counts */ | 85 | /* Maximum retry counts */ |
86 | #define SCLP_INIT_RETRY 3 | 86 | #define SCLP_INIT_RETRY 3 |
87 | #define SCLP_MASK_RETRY 3 | 87 | #define SCLP_MASK_RETRY 3 |
88 | #define SCLP_REQUEST_RETRY 3 | ||
89 | 88 | ||
90 | /* Timeout intervals in seconds.*/ | 89 | /* Timeout intervals in seconds.*/ |
91 | #define SCLP_BUSY_INTERVAL 2 | 90 | #define SCLP_BUSY_INTERVAL 10 |
92 | #define SCLP_RETRY_INTERVAL 5 | 91 | #define SCLP_RETRY_INTERVAL 15 |
93 | 92 | ||
94 | static void sclp_process_queue(void); | 93 | static void sclp_process_queue(void); |
95 | static int sclp_init_mask(int calculate); | 94 | static int sclp_init_mask(int calculate); |
@@ -153,11 +152,9 @@ __sclp_start_request(struct sclp_req *req) | |||
153 | if (sclp_running_state != sclp_running_state_idle) | 152 | if (sclp_running_state != sclp_running_state_idle) |
154 | return 0; | 153 | return 0; |
155 | del_timer(&sclp_request_timer); | 154 | del_timer(&sclp_request_timer); |
156 | if (req->start_count <= SCLP_REQUEST_RETRY) { | 155 | rc = service_call(req->command, req->sccb); |
157 | rc = service_call(req->command, req->sccb); | 156 | req->start_count++; |
158 | req->start_count++; | 157 | |
159 | } else | ||
160 | rc = -EIO; | ||
161 | if (rc == 0) { | 158 | if (rc == 0) { |
162 | /* Sucessfully started request */ | 159 | /* Sucessfully started request */ |
163 | req->status = SCLP_REQ_RUNNING; | 160 | req->status = SCLP_REQ_RUNNING; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 92be75d99a56..8cf9905d484b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -232,7 +232,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
232 | return 0; | 232 | return 0; |
233 | 233 | ||
234 | mask = 0x80 >> j; | 234 | mask = 0x80 >> j; |
235 | spin_lock(&sch->lock); | 235 | spin_lock_irq(&sch->lock); |
236 | 236 | ||
237 | stsch(sch->schid, &schib); | 237 | stsch(sch->schid, &schib); |
238 | if (!schib.pmcw.dnv) | 238 | if (!schib.pmcw.dnv) |
@@ -281,10 +281,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
281 | if (sch->driver && sch->driver->verify) | 281 | if (sch->driver && sch->driver->verify) |
282 | sch->driver->verify(&sch->dev); | 282 | sch->driver->verify(&sch->dev); |
283 | out_unlock: | 283 | out_unlock: |
284 | spin_unlock(&sch->lock); | 284 | spin_unlock_irq(&sch->lock); |
285 | return 0; | 285 | return 0; |
286 | out_unreg: | 286 | out_unreg: |
287 | spin_unlock(&sch->lock); | 287 | spin_unlock_irq(&sch->lock); |
288 | sch->lpm = 0; | 288 | sch->lpm = 0; |
289 | if (css_enqueue_subchannel_slow(sch->schid)) { | 289 | if (css_enqueue_subchannel_slow(sch->schid)) { |
290 | css_clear_subchannel_slow_list(); | 290 | css_clear_subchannel_slow_list(); |
@@ -652,7 +652,7 @@ __chp_add(struct subchannel_id schid, void *data) | |||
652 | if (!sch) | 652 | if (!sch) |
653 | /* Check if the subchannel is now available. */ | 653 | /* Check if the subchannel is now available. */ |
654 | return __chp_add_new_sch(schid); | 654 | return __chp_add_new_sch(schid); |
655 | spin_lock(&sch->lock); | 655 | spin_lock_irq(&sch->lock); |
656 | for (i=0; i<8; i++) | 656 | for (i=0; i<8; i++) |
657 | if (sch->schib.pmcw.chpid[i] == chp->id) { | 657 | if (sch->schib.pmcw.chpid[i] == chp->id) { |
658 | if (stsch(sch->schid, &sch->schib) != 0) { | 658 | if (stsch(sch->schid, &sch->schib) != 0) { |
@@ -674,7 +674,7 @@ __chp_add(struct subchannel_id schid, void *data) | |||
674 | if (sch->driver && sch->driver->verify) | 674 | if (sch->driver && sch->driver->verify) |
675 | sch->driver->verify(&sch->dev); | 675 | sch->driver->verify(&sch->dev); |
676 | 676 | ||
677 | spin_unlock(&sch->lock); | 677 | spin_unlock_irq(&sch->lock); |
678 | put_device(&sch->dev); | 678 | put_device(&sch->dev); |
679 | return 0; | 679 | return 0; |
680 | } | 680 | } |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 062fb100d94c..afc4e88551ad 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -359,7 +359,7 @@ ccw_device_set_online(struct ccw_device *cdev) | |||
359 | else | 359 | else |
360 | pr_debug("ccw_device_offline returned %d, device %s\n", | 360 | pr_debug("ccw_device_offline returned %d, device %s\n", |
361 | ret, cdev->dev.bus_id); | 361 | ret, cdev->dev.bus_id); |
362 | return (ret = 0) ? -ENODEV : ret; | 362 | return (ret == 0) ? -ENODEV : ret; |
363 | } | 363 | } |
364 | 364 | ||
365 | static ssize_t | 365 | static ssize_t |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index d2a5b04d7cba..85b1020a1fcc 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -405,7 +405,7 @@ __ccw_device_disband_start(struct ccw_device *cdev) | |||
405 | cdev->private->iretry = 5; | 405 | cdev->private->iretry = 5; |
406 | cdev->private->imask >>= 1; | 406 | cdev->private->imask >>= 1; |
407 | } | 407 | } |
408 | ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); | 408 | ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); |
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index dad4dd9887c9..6c762b43f921 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -317,7 +317,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | |||
317 | /* | 317 | /* |
318 | * We have ending status but no sense information. Do a basic sense. | 318 | * We have ending status but no sense information. Do a basic sense. |
319 | */ | 319 | */ |
320 | sch = to_subchannel(cdev->dev.parent); | ||
321 | sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; | 320 | sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; |
322 | sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); | 321 | sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); |
323 | sch->sense_ccw.count = SENSE_MAX_COUNT; | 322 | sch->sense_ccw.count = SENSE_MAX_COUNT; |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 45ce032772f4..9ed37dc9a1b0 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -165,8 +165,13 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | |||
165 | q_no = q->q_no; | 165 | q_no = q->q_no; |
166 | if(!q->is_input_q) | 166 | if(!q->is_input_q) |
167 | q_no += irq->no_input_qs; | 167 | q_no += irq->no_input_qs; |
168 | again: | ||
168 | ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); | 169 | ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); |
169 | rc = qdio_check_ccq(q, ccq); | 170 | rc = qdio_check_ccq(q, ccq); |
171 | if (rc == 1) { | ||
172 | QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); | ||
173 | goto again; | ||
174 | } | ||
170 | if (rc < 0) { | 175 | if (rc < 0) { |
171 | QDIO_DBF_TEXT2(1,trace,"eqberr"); | 176 | QDIO_DBF_TEXT2(1,trace,"eqberr"); |
172 | sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); | 177 | sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); |
@@ -195,8 +200,13 @@ qdio_do_sqbs(struct qdio_q *q, unsigned char state, | |||
195 | q_no = q->q_no; | 200 | q_no = q->q_no; |
196 | if(!q->is_input_q) | 201 | if(!q->is_input_q) |
197 | q_no += irq->no_input_qs; | 202 | q_no += irq->no_input_qs; |
203 | again: | ||
198 | ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); | 204 | ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); |
199 | rc = qdio_check_ccq(q, ccq); | 205 | rc = qdio_check_ccq(q, ccq); |
206 | if (rc == 1) { | ||
207 | QDIO_DBF_TEXT5(1,trace,"sqAGAIN"); | ||
208 | goto again; | ||
209 | } | ||
200 | if (rc < 0) { | 210 | if (rc < 0) { |
201 | QDIO_DBF_TEXT3(1,trace,"sqberr"); | 211 | QDIO_DBF_TEXT3(1,trace,"sqberr"); |
202 | sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); | 212 | sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); |
@@ -1187,8 +1197,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) | |||
1187 | 1197 | ||
1188 | if (!no_used) | 1198 | if (!no_used) |
1189 | return 1; | 1199 | return 1; |
1190 | 1200 | if (!q->siga_sync && !irq->is_qebsm) | |
1191 | if (!q->siga_sync) | ||
1192 | /* we'll check for more primed buffers in qeth_stop_polling */ | 1201 | /* we'll check for more primed buffers in qeth_stop_polling */ |
1193 | return 0; | 1202 | return 0; |
1194 | if (irq->is_qebsm) { | 1203 | if (irq->is_qebsm) { |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 6229ba4995ad..9cf88d7201d3 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -98,9 +98,9 @@ lcs_register_debug_facility(void) | |||
98 | return -ENOMEM; | 98 | return -ENOMEM; |
99 | } | 99 | } |
100 | debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); | 100 | debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); |
101 | debug_set_level(lcs_dbf_setup, 4); | 101 | debug_set_level(lcs_dbf_setup, 2); |
102 | debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); | 102 | debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); |
103 | debug_set_level(lcs_dbf_trace, 4); | 103 | debug_set_level(lcs_dbf_trace, 2); |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
@@ -1292,9 +1292,8 @@ lcs_set_multicast_list(struct net_device *dev) | |||
1292 | LCS_DBF_TEXT(4, trace, "setmulti"); | 1292 | LCS_DBF_TEXT(4, trace, "setmulti"); |
1293 | card = (struct lcs_card *) dev->priv; | 1293 | card = (struct lcs_card *) dev->priv; |
1294 | 1294 | ||
1295 | if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) { | 1295 | if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) |
1296 | schedule_work(&card->kernel_thread_starter); | 1296 | schedule_work(&card->kernel_thread_starter); |
1297 | } | ||
1298 | } | 1297 | } |
1299 | 1298 | ||
1300 | #endif /* CONFIG_IP_MULTICAST */ | 1299 | #endif /* CONFIG_IP_MULTICAST */ |
@@ -1459,6 +1458,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) | |||
1459 | lcs_release_buffer(channel, buffer); | 1458 | lcs_release_buffer(channel, buffer); |
1460 | card = (struct lcs_card *) | 1459 | card = (struct lcs_card *) |
1461 | ((char *) channel - offsetof(struct lcs_card, write)); | 1460 | ((char *) channel - offsetof(struct lcs_card, write)); |
1461 | if (netif_queue_stopped(card->dev)) | ||
1462 | netif_wake_queue(card->dev); | ||
1462 | spin_lock(&card->lock); | 1463 | spin_lock(&card->lock); |
1463 | card->tx_emitted--; | 1464 | card->tx_emitted--; |
1464 | if (card->tx_emitted <= 0 && card->tx_buffer != NULL) | 1465 | if (card->tx_emitted <= 0 && card->tx_buffer != NULL) |
@@ -1478,6 +1479,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1478 | struct net_device *dev) | 1479 | struct net_device *dev) |
1479 | { | 1480 | { |
1480 | struct lcs_header *header; | 1481 | struct lcs_header *header; |
1482 | int rc = 0; | ||
1481 | 1483 | ||
1482 | LCS_DBF_TEXT(5, trace, "hardxmit"); | 1484 | LCS_DBF_TEXT(5, trace, "hardxmit"); |
1483 | if (skb == NULL) { | 1485 | if (skb == NULL) { |
@@ -1492,10 +1494,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1492 | card->stats.tx_carrier_errors++; | 1494 | card->stats.tx_carrier_errors++; |
1493 | return 0; | 1495 | return 0; |
1494 | } | 1496 | } |
1495 | if (netif_queue_stopped(dev) ) { | 1497 | netif_stop_queue(card->dev); |
1496 | card->stats.tx_dropped++; | 1498 | spin_lock(&card->lock); |
1497 | return -EBUSY; | ||
1498 | } | ||
1499 | if (card->tx_buffer != NULL && | 1499 | if (card->tx_buffer != NULL && |
1500 | card->tx_buffer->count + sizeof(struct lcs_header) + | 1500 | card->tx_buffer->count + sizeof(struct lcs_header) + |
1501 | skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) | 1501 | skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) |
@@ -1506,7 +1506,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1506 | card->tx_buffer = lcs_get_buffer(&card->write); | 1506 | card->tx_buffer = lcs_get_buffer(&card->write); |
1507 | if (card->tx_buffer == NULL) { | 1507 | if (card->tx_buffer == NULL) { |
1508 | card->stats.tx_dropped++; | 1508 | card->stats.tx_dropped++; |
1509 | return -EBUSY; | 1509 | rc = -EBUSY; |
1510 | goto out; | ||
1510 | } | 1511 | } |
1511 | card->tx_buffer->callback = lcs_txbuffer_cb; | 1512 | card->tx_buffer->callback = lcs_txbuffer_cb; |
1512 | card->tx_buffer->count = 0; | 1513 | card->tx_buffer->count = 0; |
@@ -1518,13 +1519,18 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1518 | header->type = card->lan_type; | 1519 | header->type = card->lan_type; |
1519 | header->slot = card->portno; | 1520 | header->slot = card->portno; |
1520 | memcpy(header + 1, skb->data, skb->len); | 1521 | memcpy(header + 1, skb->data, skb->len); |
1522 | spin_unlock(&card->lock); | ||
1521 | card->stats.tx_bytes += skb->len; | 1523 | card->stats.tx_bytes += skb->len; |
1522 | card->stats.tx_packets++; | 1524 | card->stats.tx_packets++; |
1523 | dev_kfree_skb(skb); | 1525 | dev_kfree_skb(skb); |
1524 | if (card->tx_emitted <= 0) | 1526 | netif_wake_queue(card->dev); |
1527 | spin_lock(&card->lock); | ||
1528 | if (card->tx_emitted <= 0 && card->tx_buffer != NULL) | ||
1525 | /* If this is the first tx buffer emit it immediately. */ | 1529 | /* If this is the first tx buffer emit it immediately. */ |
1526 | __lcs_emit_txbuffer(card); | 1530 | __lcs_emit_txbuffer(card); |
1527 | return 0; | 1531 | out: |
1532 | spin_unlock(&card->lock); | ||
1533 | return rc; | ||
1528 | } | 1534 | } |
1529 | 1535 | ||
1530 | static int | 1536 | static int |
@@ -1535,9 +1541,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1535 | 1541 | ||
1536 | LCS_DBF_TEXT(5, trace, "pktxmit"); | 1542 | LCS_DBF_TEXT(5, trace, "pktxmit"); |
1537 | card = (struct lcs_card *) dev->priv; | 1543 | card = (struct lcs_card *) dev->priv; |
1538 | spin_lock(&card->lock); | ||
1539 | rc = __lcs_start_xmit(card, skb, dev); | 1544 | rc = __lcs_start_xmit(card, skb, dev); |
1540 | spin_unlock(&card->lock); | ||
1541 | return rc; | 1545 | return rc; |
1542 | } | 1546 | } |
1543 | 1547 | ||
@@ -2319,7 +2323,6 @@ __init lcs_init_module(void) | |||
2319 | PRINT_ERR("Initialization failed\n"); | 2323 | PRINT_ERR("Initialization failed\n"); |
2320 | return rc; | 2324 | return rc; |
2321 | } | 2325 | } |
2322 | |||
2323 | return 0; | 2326 | return 0; |
2324 | } | 2327 | } |
2325 | 2328 | ||
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h index 08e60ad43916..2fad5e40c2e4 100644 --- a/drivers/s390/net/lcs.h +++ b/drivers/s390/net/lcs.h | |||
@@ -95,7 +95,7 @@ do { \ | |||
95 | */ | 95 | */ |
96 | #define LCS_ILLEGAL_OFFSET 0xffff | 96 | #define LCS_ILLEGAL_OFFSET 0xffff |
97 | #define LCS_IOBUFFERSIZE 0x5000 | 97 | #define LCS_IOBUFFERSIZE 0x5000 |
98 | #define LCS_NUM_BUFFS 8 /* needs to be power of 2 */ | 98 | #define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ |
99 | #define LCS_MAC_LENGTH 6 | 99 | #define LCS_MAC_LENGTH 6 |
100 | #define LCS_INVALID_PORT_NO -1 | 100 | #define LCS_INVALID_PORT_NO -1 |
101 | #define LCS_LANCMD_TIMEOUT_DEFAULT 5 | 101 | #define LCS_LANCMD_TIMEOUT_DEFAULT 5 |
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 9a064d4727ad..4df0fcd7b10b 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h | |||
@@ -1076,16 +1076,6 @@ qeth_get_qdio_q_format(struct qeth_card *card) | |||
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | static inline int | 1078 | static inline int |
1079 | qeth_isdigit(char * buf) | ||
1080 | { | ||
1081 | while (*buf) { | ||
1082 | if (!isdigit(*buf++)) | ||
1083 | return 0; | ||
1084 | } | ||
1085 | return 1; | ||
1086 | } | ||
1087 | |||
1088 | static inline int | ||
1089 | qeth_isxdigit(char * buf) | 1079 | qeth_isxdigit(char * buf) |
1090 | { | 1080 | { |
1091 | while (*buf) { | 1081 | while (*buf) { |
@@ -1104,33 +1094,17 @@ qeth_ipaddr4_to_string(const __u8 *addr, char *buf) | |||
1104 | static inline int | 1094 | static inline int |
1105 | qeth_string_to_ipaddr4(const char *buf, __u8 *addr) | 1095 | qeth_string_to_ipaddr4(const char *buf, __u8 *addr) |
1106 | { | 1096 | { |
1107 | const char *start, *end; | 1097 | int count = 0, rc = 0; |
1108 | char abuf[4]; | 1098 | int in[4]; |
1109 | char *tmp; | 1099 | |
1110 | int len; | 1100 | rc = sscanf(buf, "%d.%d.%d.%d%n", |
1111 | int i; | 1101 | &in[0], &in[1], &in[2], &in[3], &count); |
1112 | 1102 | if (rc != 4 || count) | |
1113 | start = buf; | 1103 | return -EINVAL; |
1114 | for (i = 0; i < 4; i++) { | 1104 | for (count = 0; count < 4; count++) { |
1115 | if (i == 3) { | 1105 | if (in[count] > 255) |
1116 | end = strchr(start,0xa); | ||
1117 | if (end) | ||
1118 | len = end - start; | ||
1119 | else | ||
1120 | len = strlen(start); | ||
1121 | } | ||
1122 | else { | ||
1123 | end = strchr(start, '.'); | ||
1124 | len = end - start; | ||
1125 | } | ||
1126 | if ((len <= 0) || (len > 3)) | ||
1127 | return -EINVAL; | ||
1128 | memset(abuf, 0, 4); | ||
1129 | strncpy(abuf, start, len); | ||
1130 | if (!qeth_isdigit(abuf)) | ||
1131 | return -EINVAL; | 1106 | return -EINVAL; |
1132 | addr[i] = simple_strtoul(abuf, &tmp, 10); | 1107 | addr[count] = in[count]; |
1133 | start = end + 1; | ||
1134 | } | 1108 | } |
1135 | return 0; | 1109 | return 0; |
1136 | } | 1110 | } |
@@ -1149,36 +1123,44 @@ qeth_ipaddr6_to_string(const __u8 *addr, char *buf) | |||
1149 | static inline int | 1123 | static inline int |
1150 | qeth_string_to_ipaddr6(const char *buf, __u8 *addr) | 1124 | qeth_string_to_ipaddr6(const char *buf, __u8 *addr) |
1151 | { | 1125 | { |
1152 | const char *start, *end; | 1126 | char *end, *start; |
1153 | u16 *tmp_addr; | 1127 | __u16 *in; |
1154 | char abuf[5]; | 1128 | char num[5]; |
1155 | char *tmp; | 1129 | int num2, cnt, out, found, save_cnt; |
1156 | int len; | 1130 | unsigned short in_tmp[8] = {0, }; |
1157 | int i; | 1131 | |
1158 | 1132 | cnt = out = found = save_cnt = num2 = 0; | |
1159 | tmp_addr = (u16 *)addr; | 1133 | end = start = (char *) buf; |
1160 | start = buf; | 1134 | in = (__u16 *) addr; |
1161 | for (i = 0; i < 8; i++) { | 1135 | memset(in, 0, 16); |
1162 | if (i == 7) { | 1136 | while (end) { |
1163 | end = strchr(start,0xa); | 1137 | end = strchr(end,':'); |
1164 | if (end) | 1138 | if (end == NULL) { |
1165 | len = end - start; | 1139 | end = (char *)buf + (strlen(buf)); |
1166 | else | 1140 | out = 1; |
1167 | len = strlen(start); | 1141 | } |
1168 | } | 1142 | if ((end - start)) { |
1169 | else { | 1143 | memset(num, 0, 5); |
1170 | end = strchr(start, ':'); | 1144 | memcpy(num, start, end - start); |
1171 | len = end - start; | 1145 | if (!qeth_isxdigit(num)) |
1146 | return -EINVAL; | ||
1147 | sscanf(start, "%x", &num2); | ||
1148 | if (found) | ||
1149 | in_tmp[save_cnt++] = num2; | ||
1150 | else | ||
1151 | in[cnt++] = num2; | ||
1152 | if (out) | ||
1153 | break; | ||
1154 | } else { | ||
1155 | if (found) | ||
1156 | return -EINVAL; | ||
1157 | found = 1; | ||
1172 | } | 1158 | } |
1173 | if ((len <= 0) || (len > 4)) | 1159 | start = ++end; |
1174 | return -EINVAL; | 1160 | } |
1175 | memset(abuf, 0, 5); | 1161 | cnt = 7; |
1176 | strncpy(abuf, start, len); | 1162 | while (save_cnt) |
1177 | if (!qeth_isxdigit(abuf)) | 1163 | in[cnt--] = in_tmp[--save_cnt]; |
1178 | return -EINVAL; | ||
1179 | tmp_addr[i] = simple_strtoul(abuf, &tmp, 16); | ||
1180 | start = end + 1; | ||
1181 | } | ||
1182 | return 0; | 1164 | return 0; |
1183 | } | 1165 | } |
1184 | 1166 | ||
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index b02313127780..82cb4af2f0e7 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -59,8 +59,7 @@ qeth_eddp_free_context(struct qeth_eddp_context *ctx) | |||
59 | for (i = 0; i < ctx->num_pages; ++i) | 59 | for (i = 0; i < ctx->num_pages; ++i) |
60 | free_page((unsigned long)ctx->pages[i]); | 60 | free_page((unsigned long)ctx->pages[i]); |
61 | kfree(ctx->pages); | 61 | kfree(ctx->pages); |
62 | if (ctx->elements != NULL) | 62 | kfree(ctx->elements); |
63 | kfree(ctx->elements); | ||
64 | kfree(ctx); | 63 | kfree(ctx); |
65 | } | 64 | } |
66 | 65 | ||
@@ -413,6 +412,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
413 | 412 | ||
414 | QETH_DBF_TEXT(trace, 5, "eddpftcp"); | 413 | QETH_DBF_TEXT(trace, 5, "eddpftcp"); |
415 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; | 414 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; |
415 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
416 | eddp->skb_offset += sizeof(struct ethhdr); | ||
417 | #ifdef CONFIG_QETH_VLAN | ||
418 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
419 | eddp->skb_offset += VLAN_HLEN; | ||
420 | #endif /* CONFIG_QETH_VLAN */ | ||
421 | } | ||
416 | tcph = eddp->skb->h.th; | 422 | tcph = eddp->skb->h.th; |
417 | while (eddp->skb_offset < eddp->skb->len) { | 423 | while (eddp->skb_offset < eddp->skb->len) { |
418 | data_len = min((int)skb_shinfo(eddp->skb)->tso_size, | 424 | data_len = min((int)skb_shinfo(eddp->skb)->tso_size, |
@@ -483,6 +489,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
483 | return -ENOMEM; | 489 | return -ENOMEM; |
484 | } | 490 | } |
485 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | 491 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { |
492 | skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); | ||
486 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); | 493 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); |
487 | #ifdef CONFIG_QETH_VLAN | 494 | #ifdef CONFIG_QETH_VLAN |
488 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | 495 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 410abeada6c4..dba7f7f02e79 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -516,7 +516,8 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) | |||
516 | QETH_DBF_TEXT(setup, 3, "setoffl"); | 516 | QETH_DBF_TEXT(setup, 3, "setoffl"); |
517 | QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); | 517 | QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); |
518 | 518 | ||
519 | netif_carrier_off(card->dev); | 519 | if (card->dev && netif_carrier_ok(card->dev)) |
520 | netif_carrier_off(card->dev); | ||
520 | recover_flag = card->state; | 521 | recover_flag = card->state; |
521 | if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){ | 522 | if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){ |
522 | PRINT_WARN("Stopping card %s interrupted by user!\n", | 523 | PRINT_WARN("Stopping card %s interrupted by user!\n", |
@@ -1679,6 +1680,7 @@ qeth_cmd_timeout(unsigned long data) | |||
1679 | spin_unlock_irqrestore(&reply->card->lock, flags); | 1680 | spin_unlock_irqrestore(&reply->card->lock, flags); |
1680 | } | 1681 | } |
1681 | 1682 | ||
1683 | |||
1682 | static struct qeth_ipa_cmd * | 1684 | static struct qeth_ipa_cmd * |
1683 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | 1685 | qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) |
1684 | { | 1686 | { |
@@ -1699,7 +1701,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | |||
1699 | QETH_CARD_IFNAME(card), | 1701 | QETH_CARD_IFNAME(card), |
1700 | card->info.chpid); | 1702 | card->info.chpid); |
1701 | card->lan_online = 0; | 1703 | card->lan_online = 0; |
1702 | netif_carrier_off(card->dev); | 1704 | if (card->dev && netif_carrier_ok(card->dev)) |
1705 | netif_carrier_off(card->dev); | ||
1703 | return NULL; | 1706 | return NULL; |
1704 | case IPA_CMD_STARTLAN: | 1707 | case IPA_CMD_STARTLAN: |
1705 | PRINT_INFO("Link reestablished on %s " | 1708 | PRINT_INFO("Link reestablished on %s " |
@@ -5562,7 +5565,7 @@ qeth_set_multicast_list(struct net_device *dev) | |||
5562 | if (card->info.type == QETH_CARD_TYPE_OSN) | 5565 | if (card->info.type == QETH_CARD_TYPE_OSN) |
5563 | return ; | 5566 | return ; |
5564 | 5567 | ||
5565 | QETH_DBF_TEXT(trace,3,"setmulti"); | 5568 | QETH_DBF_TEXT(trace, 3, "setmulti"); |
5566 | qeth_delete_mc_addresses(card); | 5569 | qeth_delete_mc_addresses(card); |
5567 | if (card->options.layer2) { | 5570 | if (card->options.layer2) { |
5568 | qeth_layer2_add_multicast(card); | 5571 | qeth_layer2_add_multicast(card); |
@@ -5579,7 +5582,6 @@ out: | |||
5579 | return; | 5582 | return; |
5580 | if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) | 5583 | if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) |
5581 | schedule_work(&card->kernel_thread_starter); | 5584 | schedule_work(&card->kernel_thread_starter); |
5582 | |||
5583 | } | 5585 | } |
5584 | 5586 | ||
5585 | static int | 5587 | static int |
@@ -7452,6 +7454,7 @@ qeth_softsetup_card(struct qeth_card *card) | |||
7452 | card->lan_online = 1; | 7454 | card->lan_online = 1; |
7453 | if (card->info.type==QETH_CARD_TYPE_OSN) | 7455 | if (card->info.type==QETH_CARD_TYPE_OSN) |
7454 | goto out; | 7456 | goto out; |
7457 | qeth_set_large_send(card, card->options.large_send); | ||
7455 | if (card->options.layer2) { | 7458 | if (card->options.layer2) { |
7456 | card->dev->features |= | 7459 | card->dev->features |= |
7457 | NETIF_F_HW_VLAN_FILTER | | 7460 | NETIF_F_HW_VLAN_FILTER | |
@@ -7468,12 +7471,6 @@ qeth_softsetup_card(struct qeth_card *card) | |||
7468 | #endif | 7471 | #endif |
7469 | goto out; | 7472 | goto out; |
7470 | } | 7473 | } |
7471 | if ((card->options.large_send == QETH_LARGE_SEND_EDDP) || | ||
7472 | (card->options.large_send == QETH_LARGE_SEND_TSO)) | ||
7473 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG; | ||
7474 | else | ||
7475 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); | ||
7476 | |||
7477 | if ((rc = qeth_setadapter_parms(card))) | 7474 | if ((rc = qeth_setadapter_parms(card))) |
7478 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 7475 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); |
7479 | if ((rc = qeth_start_ipassists(card))) | 7476 | if ((rc = qeth_start_ipassists(card))) |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 4d7d47cf2394..a5f2ba9a8fdb 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -710,10 +710,9 @@ static inline void | |||
710 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | 710 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, |
711 | struct zfcp_adapter *adapter, | 711 | struct zfcp_adapter *adapter, |
712 | struct scsi_cmnd *scsi_cmnd, | 712 | struct scsi_cmnd *scsi_cmnd, |
713 | struct zfcp_fsf_req *new_fsf_req) | 713 | struct zfcp_fsf_req *fsf_req, |
714 | struct zfcp_fsf_req *old_fsf_req) | ||
714 | { | 715 | { |
715 | struct zfcp_fsf_req *fsf_req = | ||
716 | (struct zfcp_fsf_req *)scsi_cmnd->host_scribble; | ||
717 | struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; | 716 | struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; |
718 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; | 717 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; |
719 | unsigned long flags; | 718 | unsigned long flags; |
@@ -727,19 +726,20 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
727 | if (offset == 0) { | 726 | if (offset == 0) { |
728 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 727 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); |
729 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); | 728 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); |
730 | if (scsi_cmnd->device) { | 729 | if (scsi_cmnd != NULL) { |
731 | rec->scsi_id = scsi_cmnd->device->id; | 730 | if (scsi_cmnd->device) { |
732 | rec->scsi_lun = scsi_cmnd->device->lun; | 731 | rec->scsi_id = scsi_cmnd->device->id; |
732 | rec->scsi_lun = scsi_cmnd->device->lun; | ||
733 | } | ||
734 | rec->scsi_result = scsi_cmnd->result; | ||
735 | rec->scsi_cmnd = (unsigned long)scsi_cmnd; | ||
736 | rec->scsi_serial = scsi_cmnd->serial_number; | ||
737 | memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, | ||
738 | min((int)scsi_cmnd->cmd_len, | ||
739 | ZFCP_DBF_SCSI_OPCODE)); | ||
740 | rec->scsi_retries = scsi_cmnd->retries; | ||
741 | rec->scsi_allowed = scsi_cmnd->allowed; | ||
733 | } | 742 | } |
734 | rec->scsi_result = scsi_cmnd->result; | ||
735 | rec->scsi_cmnd = (unsigned long)scsi_cmnd; | ||
736 | rec->scsi_serial = scsi_cmnd->serial_number; | ||
737 | memcpy(rec->scsi_opcode, | ||
738 | &scsi_cmnd->cmnd, | ||
739 | min((int)scsi_cmnd->cmd_len, | ||
740 | ZFCP_DBF_SCSI_OPCODE)); | ||
741 | rec->scsi_retries = scsi_cmnd->retries; | ||
742 | rec->scsi_allowed = scsi_cmnd->allowed; | ||
743 | if (fsf_req != NULL) { | 743 | if (fsf_req != NULL) { |
744 | fcp_rsp = (struct fcp_rsp_iu *) | 744 | fcp_rsp = (struct fcp_rsp_iu *) |
745 | &(fsf_req->qtcb->bottom.io.fcp_rsp); | 745 | &(fsf_req->qtcb->bottom.io.fcp_rsp); |
@@ -772,15 +772,8 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
772 | rec->fsf_seqno = fsf_req->seq_no; | 772 | rec->fsf_seqno = fsf_req->seq_no; |
773 | rec->fsf_issued = fsf_req->issued; | 773 | rec->fsf_issued = fsf_req->issued; |
774 | } | 774 | } |
775 | if (new_fsf_req != NULL) { | 775 | rec->type.old_fsf_reqid = |
776 | rec->type.new_fsf_req.fsf_reqid = | 776 | (unsigned long) old_fsf_req; |
777 | (unsigned long) | ||
778 | new_fsf_req; | ||
779 | rec->type.new_fsf_req.fsf_seqno = | ||
780 | new_fsf_req->seq_no; | ||
781 | rec->type.new_fsf_req.fsf_issued = | ||
782 | new_fsf_req->issued; | ||
783 | } | ||
784 | } else { | 777 | } else { |
785 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | 778 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); |
786 | dump->total_size = buflen; | 779 | dump->total_size = buflen; |
@@ -801,19 +794,21 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
801 | inline void | 794 | inline void |
802 | zfcp_scsi_dbf_event_result(const char *tag, int level, | 795 | zfcp_scsi_dbf_event_result(const char *tag, int level, |
803 | struct zfcp_adapter *adapter, | 796 | struct zfcp_adapter *adapter, |
804 | struct scsi_cmnd *scsi_cmnd) | 797 | struct scsi_cmnd *scsi_cmnd, |
798 | struct zfcp_fsf_req *fsf_req) | ||
805 | { | 799 | { |
806 | _zfcp_scsi_dbf_event_common("rslt", | 800 | _zfcp_scsi_dbf_event_common("rslt", tag, level, |
807 | tag, level, adapter, scsi_cmnd, NULL); | 801 | adapter, scsi_cmnd, fsf_req, NULL); |
808 | } | 802 | } |
809 | 803 | ||
810 | inline void | 804 | inline void |
811 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | 805 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, |
812 | struct scsi_cmnd *scsi_cmnd, | 806 | struct scsi_cmnd *scsi_cmnd, |
813 | struct zfcp_fsf_req *new_fsf_req) | 807 | struct zfcp_fsf_req *new_fsf_req, |
808 | struct zfcp_fsf_req *old_fsf_req) | ||
814 | { | 809 | { |
815 | _zfcp_scsi_dbf_event_common("abrt", | 810 | _zfcp_scsi_dbf_event_common("abrt", tag, 1, |
816 | tag, 1, adapter, scsi_cmnd, new_fsf_req); | 811 | adapter, scsi_cmnd, new_fsf_req, old_fsf_req); |
817 | } | 812 | } |
818 | 813 | ||
819 | inline void | 814 | inline void |
@@ -823,7 +818,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | |||
823 | struct zfcp_adapter *adapter = unit->port->adapter; | 818 | struct zfcp_adapter *adapter = unit->port->adapter; |
824 | 819 | ||
825 | _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", | 820 | _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", |
826 | tag, 1, adapter, scsi_cmnd, NULL); | 821 | tag, 1, adapter, scsi_cmnd, NULL, NULL); |
827 | } | 822 | } |
828 | 823 | ||
829 | static int | 824 | static int |
@@ -856,6 +851,10 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
856 | rec->scsi_retries); | 851 | rec->scsi_retries); |
857 | len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", | 852 | len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", |
858 | rec->scsi_allowed); | 853 | rec->scsi_allowed); |
854 | if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { | ||
855 | len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx", | ||
856 | rec->type.old_fsf_reqid); | ||
857 | } | ||
859 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", | 858 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", |
860 | rec->fsf_reqid); | 859 | rec->fsf_reqid); |
861 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", | 860 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", |
@@ -883,21 +882,6 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
883 | min((int)rec->type.fcp.sns_info_len, | 882 | min((int)rec->type.fcp.sns_info_len, |
884 | ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, | 883 | ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, |
885 | rec->type.fcp.sns_info_len); | 884 | rec->type.fcp.sns_info_len); |
886 | } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { | ||
887 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx", | ||
888 | rec->type.new_fsf_req.fsf_reqid); | ||
889 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x", | ||
890 | rec->type.new_fsf_req.fsf_seqno); | ||
891 | len += zfcp_dbf_stck(out_buf + len, "fsf_issued", | ||
892 | rec->type.new_fsf_req.fsf_issued); | ||
893 | } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) || | ||
894 | (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) { | ||
895 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx", | ||
896 | rec->type.new_fsf_req.fsf_reqid); | ||
897 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x", | ||
898 | rec->type.new_fsf_req.fsf_seqno); | ||
899 | len += zfcp_dbf_stck(out_buf + len, "fsf_issued", | ||
900 | rec->type.new_fsf_req.fsf_issued); | ||
901 | } | 885 | } |
902 | 886 | ||
903 | len += sprintf(out_buf + len, "\n"); | 887 | len += sprintf(out_buf + len, "\n"); |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e260d19fa717..7f551d66f47f 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -152,11 +152,6 @@ typedef u32 scsi_lun_t; | |||
152 | #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 | 152 | #define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 |
153 | #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 | 153 | #define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 |
154 | 154 | ||
155 | /* Retry 5 times every 2 second, then every minute */ | ||
156 | #define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5 | ||
157 | #define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200 | ||
158 | #define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000 | ||
159 | |||
160 | /* timeout value for "default timer" for fsf requests */ | 155 | /* timeout value for "default timer" for fsf requests */ |
161 | #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); | 156 | #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); |
162 | 157 | ||
@@ -429,11 +424,7 @@ struct zfcp_scsi_dbf_record { | |||
429 | u32 fsf_seqno; | 424 | u32 fsf_seqno; |
430 | u64 fsf_issued; | 425 | u64 fsf_issued; |
431 | union { | 426 | union { |
432 | struct { | 427 | u64 old_fsf_reqid; |
433 | u64 fsf_reqid; | ||
434 | u32 fsf_seqno; | ||
435 | u64 fsf_issued; | ||
436 | } new_fsf_req; | ||
437 | struct { | 428 | struct { |
438 | u8 rsp_validity; | 429 | u8 rsp_validity; |
439 | u8 rsp_scsi_status; | 430 | u8 rsp_scsi_status; |
@@ -915,8 +906,6 @@ struct zfcp_adapter { | |||
915 | wwn_t peer_wwnn; /* P2P peer WWNN */ | 906 | wwn_t peer_wwnn; /* P2P peer WWNN */ |
916 | wwn_t peer_wwpn; /* P2P peer WWPN */ | 907 | wwn_t peer_wwpn; /* P2P peer WWPN */ |
917 | u32 peer_d_id; /* P2P peer D_ID */ | 908 | u32 peer_d_id; /* P2P peer D_ID */ |
918 | wwn_t physical_wwpn; /* WWPN of physical port */ | ||
919 | u32 physical_s_id; /* local FC port ID */ | ||
920 | struct ccw_device *ccw_device; /* S/390 ccw device */ | 909 | struct ccw_device *ccw_device; /* S/390 ccw device */ |
921 | u8 fc_service_class; | 910 | u8 fc_service_class; |
922 | u32 hydra_version; /* Hydra version */ | 911 | u32 hydra_version; /* Hydra version */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index da947e662031..e3c4bdd29a60 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -2246,15 +2246,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) | |||
2246 | { | 2246 | { |
2247 | int retval; | 2247 | int retval; |
2248 | 2248 | ||
2249 | if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, | ||
2250 | &erp_action->adapter->status)) && | ||
2251 | (erp_action->adapter->adapter_features & | ||
2252 | FSF_FEATURE_HBAAPI_MANAGEMENT)) { | ||
2253 | zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); | ||
2254 | atomic_set(&erp_action->adapter->erp_counter, 0); | ||
2255 | return ZFCP_ERP_FAILED; | ||
2256 | } | ||
2257 | |||
2258 | retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); | 2249 | retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); |
2259 | if (retval == ZFCP_ERP_FAILED) | 2250 | if (retval == ZFCP_ERP_FAILED) |
2260 | return ZFCP_ERP_FAILED; | 2251 | return ZFCP_ERP_FAILED; |
@@ -2266,13 +2257,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) | |||
2266 | return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); | 2257 | return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); |
2267 | } | 2258 | } |
2268 | 2259 | ||
2269 | /* | ||
2270 | * function: | ||
2271 | * | ||
2272 | * purpose: | ||
2273 | * | ||
2274 | * returns: | ||
2275 | */ | ||
2276 | static int | 2260 | static int |
2277 | zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) | 2261 | zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) |
2278 | { | 2262 | { |
@@ -2350,48 +2334,40 @@ static int | |||
2350 | zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) | 2334 | zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) |
2351 | { | 2335 | { |
2352 | int ret; | 2336 | int ret; |
2353 | int retries; | 2337 | struct zfcp_adapter *adapter; |
2354 | int sleep; | ||
2355 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2356 | 2338 | ||
2339 | adapter = erp_action->adapter; | ||
2357 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); | 2340 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); |
2358 | 2341 | ||
2359 | retries = 0; | 2342 | write_lock(&adapter->erp_lock); |
2360 | do { | 2343 | zfcp_erp_action_to_running(erp_action); |
2361 | write_lock(&adapter->erp_lock); | 2344 | write_unlock(&adapter->erp_lock); |
2362 | zfcp_erp_action_to_running(erp_action); | ||
2363 | write_unlock(&adapter->erp_lock); | ||
2364 | zfcp_erp_timeout_init(erp_action); | ||
2365 | ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); | ||
2366 | if (ret == -EOPNOTSUPP) { | ||
2367 | debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); | ||
2368 | return ZFCP_ERP_SUCCEEDED; | ||
2369 | } else if (ret) { | ||
2370 | debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); | ||
2371 | return ZFCP_ERP_FAILED; | ||
2372 | } | ||
2373 | debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); | ||
2374 | 2345 | ||
2375 | down(&adapter->erp_ready_sem); | 2346 | zfcp_erp_timeout_init(erp_action); |
2376 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { | 2347 | ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); |
2377 | ZFCP_LOG_INFO("error: exchange of port data " | 2348 | if (ret == -EOPNOTSUPP) { |
2378 | "for adapter %s timed out\n", | 2349 | debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); |
2379 | zfcp_get_busid_by_adapter(adapter)); | 2350 | return ZFCP_ERP_SUCCEEDED; |
2380 | break; | 2351 | } else if (ret) { |
2381 | } | 2352 | debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); |
2382 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, | 2353 | return ZFCP_ERP_FAILED; |
2383 | &adapter->status)) | 2354 | } |
2384 | break; | 2355 | debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); |
2385 | 2356 | ||
2386 | if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { | 2357 | ret = ZFCP_ERP_SUCCEEDED; |
2387 | sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; | 2358 | down(&adapter->erp_ready_sem); |
2388 | retries++; | 2359 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { |
2389 | } else | 2360 | ZFCP_LOG_INFO("error: exchange port data timed out (adapter " |
2390 | sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; | 2361 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); |
2391 | schedule_timeout(sleep); | 2362 | ret = ZFCP_ERP_FAILED; |
2392 | } while (1); | 2363 | } |
2364 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { | ||
2365 | ZFCP_LOG_INFO("error: exchange port data failed (adapter " | ||
2366 | "%s\n", zfcp_get_busid_by_adapter(adapter)); | ||
2367 | ret = ZFCP_ERP_FAILED; | ||
2368 | } | ||
2393 | 2369 | ||
2394 | return ZFCP_ERP_SUCCEEDED; | 2370 | return ret; |
2395 | } | 2371 | } |
2396 | 2372 | ||
2397 | /* | 2373 | /* |
@@ -3439,6 +3415,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3439 | "(adapter %s, wwpn=0x%016Lx)\n", | 3415 | "(adapter %s, wwpn=0x%016Lx)\n", |
3440 | zfcp_get_busid_by_port(port), | 3416 | zfcp_get_busid_by_port(port), |
3441 | port->wwpn); | 3417 | port->wwpn); |
3418 | else | ||
3419 | scsi_flush_work(adapter->scsi_host); | ||
3442 | } | 3420 | } |
3443 | zfcp_port_put(port); | 3421 | zfcp_port_put(port); |
3444 | break; | 3422 | break; |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index c1ba7cf1b496..700f5402a978 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -194,9 +194,10 @@ extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); | |||
194 | extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); | 194 | extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); |
195 | 195 | ||
196 | extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, | 196 | extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, |
197 | struct scsi_cmnd *); | 197 | struct scsi_cmnd *, |
198 | struct zfcp_fsf_req *); | ||
198 | extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, | 199 | extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, |
199 | struct scsi_cmnd *, | 200 | struct scsi_cmnd *, struct zfcp_fsf_req *, |
200 | struct zfcp_fsf_req *); | 201 | struct zfcp_fsf_req *); |
201 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, | 202 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, |
202 | struct scsi_cmnd *); | 203 | struct scsi_cmnd *); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9f0cb3d820c0..662ec571d73b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -388,6 +388,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
388 | case FSF_PROT_LINK_DOWN: | 388 | case FSF_PROT_LINK_DOWN: |
389 | zfcp_fsf_link_down_info_eval(adapter, | 389 | zfcp_fsf_link_down_info_eval(adapter, |
390 | &prot_status_qual->link_down_info); | 390 | &prot_status_qual->link_down_info); |
391 | zfcp_erp_adapter_reopen(adapter, 0); | ||
391 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 392 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
392 | break; | 393 | break; |
393 | 394 | ||
@@ -558,10 +559,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, | |||
558 | 559 | ||
559 | atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); | 560 | atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); |
560 | 561 | ||
561 | if (link_down == NULL) { | 562 | if (link_down == NULL) |
562 | zfcp_erp_adapter_reopen(adapter, 0); | 563 | goto out; |
563 | return; | ||
564 | } | ||
565 | 564 | ||
566 | switch (link_down->error_code) { | 565 | switch (link_down->error_code) { |
567 | case FSF_PSQ_LINK_NO_LIGHT: | 566 | case FSF_PSQ_LINK_NO_LIGHT: |
@@ -643,16 +642,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, | |||
643 | link_down->explanation_code, | 642 | link_down->explanation_code, |
644 | link_down->vendor_specific_code); | 643 | link_down->vendor_specific_code); |
645 | 644 | ||
646 | switch (link_down->error_code) { | 645 | out: |
647 | case FSF_PSQ_LINK_NO_LIGHT: | 646 | zfcp_erp_adapter_failed(adapter); |
648 | case FSF_PSQ_LINK_WRAP_PLUG: | ||
649 | case FSF_PSQ_LINK_NO_FCP: | ||
650 | case FSF_PSQ_LINK_FIRMWARE_UPDATE: | ||
651 | zfcp_erp_adapter_reopen(adapter, 0); | ||
652 | break; | ||
653 | default: | ||
654 | zfcp_erp_adapter_failed(adapter); | ||
655 | } | ||
656 | } | 647 | } |
657 | 648 | ||
658 | /* | 649 | /* |
@@ -2304,6 +2295,35 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action, | |||
2304 | return retval; | 2295 | return retval; |
2305 | } | 2296 | } |
2306 | 2297 | ||
2298 | /** | ||
2299 | * zfcp_fsf_exchange_port_evaluate | ||
2300 | * @fsf_req: fsf_req which belongs to xchg port data request | ||
2301 | * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1) | ||
2302 | */ | ||
2303 | static void | ||
2304 | zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) | ||
2305 | { | ||
2306 | struct zfcp_adapter *adapter; | ||
2307 | struct fsf_qtcb *qtcb; | ||
2308 | struct fsf_qtcb_bottom_port *bottom, *data; | ||
2309 | struct Scsi_Host *shost; | ||
2310 | |||
2311 | adapter = fsf_req->adapter; | ||
2312 | qtcb = fsf_req->qtcb; | ||
2313 | bottom = &qtcb->bottom.port; | ||
2314 | shost = adapter->scsi_host; | ||
2315 | |||
2316 | data = (struct fsf_qtcb_bottom_port*) fsf_req->data; | ||
2317 | if (data) | ||
2318 | memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); | ||
2319 | |||
2320 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) | ||
2321 | fc_host_permanent_port_name(shost) = bottom->wwpn; | ||
2322 | else | ||
2323 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); | ||
2324 | fc_host_maxframe_size(shost) = bottom->maximum_frame_size; | ||
2325 | fc_host_supported_speeds(shost) = bottom->supported_speed; | ||
2326 | } | ||
2307 | 2327 | ||
2308 | /** | 2328 | /** |
2309 | * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request | 2329 | * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request |
@@ -2312,38 +2332,26 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action, | |||
2312 | static void | 2332 | static void |
2313 | zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) | 2333 | zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) |
2314 | { | 2334 | { |
2315 | struct zfcp_adapter *adapter = fsf_req->adapter; | 2335 | struct zfcp_adapter *adapter; |
2316 | struct Scsi_Host *shost = adapter->scsi_host; | 2336 | struct fsf_qtcb *qtcb; |
2317 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 2337 | |
2318 | struct fsf_qtcb_bottom_port *bottom, *data; | 2338 | adapter = fsf_req->adapter; |
2339 | qtcb = fsf_req->qtcb; | ||
2319 | 2340 | ||
2320 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) | 2341 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) |
2321 | return; | 2342 | return; |
2322 | 2343 | ||
2323 | switch (qtcb->header.fsf_status) { | 2344 | switch (qtcb->header.fsf_status) { |
2324 | case FSF_GOOD: | 2345 | case FSF_GOOD: |
2346 | zfcp_fsf_exchange_port_evaluate(fsf_req, 1); | ||
2325 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); | 2347 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); |
2326 | |||
2327 | bottom = &qtcb->bottom.port; | ||
2328 | data = (struct fsf_qtcb_bottom_port*) fsf_req->data; | ||
2329 | if (data) | ||
2330 | memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port)); | ||
2331 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) | ||
2332 | fc_host_permanent_port_name(shost) = bottom->wwpn; | ||
2333 | else | ||
2334 | fc_host_permanent_port_name(shost) = | ||
2335 | fc_host_port_name(shost); | ||
2336 | fc_host_maxframe_size(shost) = bottom->maximum_frame_size; | ||
2337 | fc_host_supported_speeds(shost) = bottom->supported_speed; | ||
2338 | break; | 2348 | break; |
2339 | |||
2340 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: | 2349 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: |
2350 | zfcp_fsf_exchange_port_evaluate(fsf_req, 0); | ||
2341 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); | 2351 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); |
2342 | |||
2343 | zfcp_fsf_link_down_info_eval(adapter, | 2352 | zfcp_fsf_link_down_info_eval(adapter, |
2344 | &qtcb->header.fsf_status_qual.link_down_info); | 2353 | &qtcb->header.fsf_status_qual.link_down_info); |
2345 | break; | 2354 | break; |
2346 | |||
2347 | default: | 2355 | default: |
2348 | debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); | 2356 | debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); |
2349 | debug_event(adapter->erp_dbf, 0, | 2357 | debug_event(adapter->erp_dbf, 0, |
@@ -4203,11 +4211,11 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req) | |||
4203 | ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); | 4211 | ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); |
4204 | 4212 | ||
4205 | if (scpnt->result != 0) | 4213 | if (scpnt->result != 0) |
4206 | zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); | 4214 | zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); |
4207 | else if (scpnt->retries > 0) | 4215 | else if (scpnt->retries > 0) |
4208 | zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); | 4216 | zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); |
4209 | else | 4217 | else |
4210 | zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); | 4218 | zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); |
4211 | 4219 | ||
4212 | /* cleanup pointer (need this especially for abort) */ | 4220 | /* cleanup pointer (need this especially for abort) */ |
4213 | scpnt->host_scribble = NULL; | 4221 | scpnt->host_scribble = NULL; |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index e0803757c0fa..9f6b4d7a46f3 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -242,7 +242,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | |||
242 | if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) | 242 | if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) |
243 | zfcp_scsi_dbf_event_result("fail", 4, | 243 | zfcp_scsi_dbf_event_result("fail", 4, |
244 | (struct zfcp_adapter*) scpnt->device->host->hostdata[0], | 244 | (struct zfcp_adapter*) scpnt->device->host->hostdata[0], |
245 | scpnt); | 245 | scpnt, NULL); |
246 | /* return directly */ | 246 | /* return directly */ |
247 | scpnt->scsi_done(scpnt); | 247 | scpnt->scsi_done(scpnt); |
248 | } | 248 | } |
@@ -446,7 +446,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
446 | old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; | 446 | old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; |
447 | if (!old_fsf_req) { | 447 | if (!old_fsf_req) { |
448 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 448 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
449 | zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); | 449 | zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); |
450 | retval = SUCCESS; | 450 | retval = SUCCESS; |
451 | goto out; | 451 | goto out; |
452 | } | 452 | } |
@@ -460,6 +460,8 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
460 | adapter, unit, 0); | 460 | adapter, unit, 0); |
461 | if (!new_fsf_req) { | 461 | if (!new_fsf_req) { |
462 | ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); | 462 | ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); |
463 | zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, | ||
464 | old_fsf_req); | ||
463 | retval = FAILED; | 465 | retval = FAILED; |
464 | goto out; | 466 | goto out; |
465 | } | 467 | } |
@@ -470,13 +472,16 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
470 | 472 | ||
471 | /* status should be valid since signals were not permitted */ | 473 | /* status should be valid since signals were not permitted */ |
472 | if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { | 474 | if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { |
473 | zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); | 475 | zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req, |
476 | NULL); | ||
474 | retval = SUCCESS; | 477 | retval = SUCCESS; |
475 | } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { | 478 | } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { |
476 | zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); | 479 | zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, |
480 | NULL); | ||
477 | retval = SUCCESS; | 481 | retval = SUCCESS; |
478 | } else { | 482 | } else { |
479 | zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); | 483 | zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, |
484 | NULL); | ||
480 | retval = FAILED; | 485 | retval = FAILED; |
481 | } | 486 | } |
482 | zfcp_fsf_req_free(new_fsf_req); | 487 | zfcp_fsf_req_free(new_fsf_req); |
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c index dfc07370f412..b29ac25e07f3 100644 --- a/drivers/s390/scsi/zfcp_sysfs_adapter.c +++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c | |||
@@ -55,8 +55,6 @@ ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); | |||
55 | ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); | 55 | ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); |
56 | ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); | 56 | ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); |
57 | ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); | 57 | ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); |
58 | ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn); | ||
59 | ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id); | ||
60 | ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); | 58 | ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); |
61 | ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); | 59 | ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); |
62 | ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", | 60 | ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", |
@@ -241,8 +239,6 @@ static struct attribute *zfcp_adapter_attrs[] = { | |||
241 | &dev_attr_peer_wwnn.attr, | 239 | &dev_attr_peer_wwnn.attr, |
242 | &dev_attr_peer_wwpn.attr, | 240 | &dev_attr_peer_wwpn.attr, |
243 | &dev_attr_peer_d_id.attr, | 241 | &dev_attr_peer_d_id.attr, |
244 | &dev_attr_physical_wwpn.attr, | ||
245 | &dev_attr_physical_s_id.attr, | ||
246 | &dev_attr_card_version.attr, | 242 | &dev_attr_card_version.attr, |
247 | &dev_attr_lic_version.attr, | 243 | &dev_attr_lic_version.attr, |
248 | &dev_attr_status.attr, | 244 | &dev_attr_status.attr, |