diff options
Diffstat (limited to 'drivers/s390')
53 files changed, 3138 insertions, 821 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index 721787cc5a1c..4d36208ff8de 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig | |||
@@ -183,7 +183,13 @@ config S390_TAPE_34XX | |||
183 | tape subsystems and 100% compatibles. | 183 | tape subsystems and 100% compatibles. |
184 | It is safe to say "Y" here. | 184 | It is safe to say "Y" here. |
185 | 185 | ||
186 | 186 | config S390_TAPE_3590 | |
187 | tristate "Support for 3590 tape hardware" | ||
188 | depends on S390_TAPE | ||
189 | help | ||
190 | Select this option if you want to access IBM 3590 magnetic | ||
191 | tape subsystems and 100% compatibles. | ||
192 | It is safe to say "Y" here. | ||
187 | 193 | ||
188 | config VMLOGRDR | 194 | config VMLOGRDR |
189 | tristate "Support for the z/VM recording system services (VM only)" | 195 | tristate "Support for the z/VM recording system services (VM only)" |
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6f50cc9323d9..929d6fff6152 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig | |||
@@ -49,20 +49,18 @@ config DASD_FBA | |||
49 | 49 | ||
50 | config DASD_DIAG | 50 | config DASD_DIAG |
51 | tristate "Support for DIAG access to Disks" | 51 | tristate "Support for DIAG access to Disks" |
52 | depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL) | 52 | depends on DASD |
53 | help | 53 | help |
54 | Select this option if you want to use Diagnose250 command to access | 54 | Select this option if you want to use Diagnose250 command to access |
55 | Disks under VM. If you are not running under VM or unsure what it is, | 55 | Disks under VM. If you are not running under VM or unsure what it is, |
56 | say "N". | 56 | say "N". |
57 | 57 | ||
58 | config DASD_CMB | 58 | config DASD_EER |
59 | tristate "Compatibility interface for DASD channel measurement blocks" | 59 | bool "Extended error reporting (EER)" |
60 | depends on DASD | 60 | depends on DASD |
61 | help | 61 | help |
62 | This driver provides an additional interface to the channel measurement | 62 | This driver provides a character device interface to the |
63 | facility, which is normally accessed though sysfs, with a set of | 63 | DASD extended error reporting. This is only needed if you want to |
64 | ioctl functions specific to the dasd driver. | 64 | use applications written for the EER facility. |
65 | This is only needed if you want to use applications written for | ||
66 | linux-2.4 dasd channel measurement facility interface. | ||
67 | 65 | ||
68 | endif | 66 | endif |
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 58c6780134f7..be9f22d52fd8 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile | |||
@@ -7,11 +7,13 @@ dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o | |||
7 | dasd_diag_mod-objs := dasd_diag.o | 7 | dasd_diag_mod-objs := dasd_diag.o |
8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ | 8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ |
9 | dasd_genhd.o dasd_erp.o | 9 | dasd_genhd.o dasd_erp.o |
10 | ifdef CONFIG_DASD_EER | ||
11 | dasd_mod-objs += dasd_eer.o | ||
12 | endif | ||
10 | 13 | ||
11 | obj-$(CONFIG_DASD) += dasd_mod.o | 14 | obj-$(CONFIG_DASD) += dasd_mod.o |
12 | obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o | 15 | obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o |
13 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o | 16 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o |
14 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o | 17 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o |
15 | obj-$(CONFIG_DASD_CMB) += dasd_cmb.o | ||
16 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o | 18 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o |
17 | obj-$(CONFIG_DCSSBLK) += dcssblk.o | 19 | obj-$(CONFIG_DCSSBLK) += dcssblk.o |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 33157c84d1d3..0a9f12c4e911 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -43,7 +43,6 @@ MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); | |||
43 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," | 43 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," |
44 | " Copyright 2000 IBM Corporation"); | 44 | " Copyright 2000 IBM Corporation"); |
45 | MODULE_SUPPORTED_DEVICE("dasd"); | 45 | MODULE_SUPPORTED_DEVICE("dasd"); |
46 | MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s"); | ||
47 | MODULE_LICENSE("GPL"); | 46 | MODULE_LICENSE("GPL"); |
48 | 47 | ||
49 | /* | 48 | /* |
@@ -71,10 +70,9 @@ dasd_alloc_device(void) | |||
71 | { | 70 | { |
72 | struct dasd_device *device; | 71 | struct dasd_device *device; |
73 | 72 | ||
74 | device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC); | 73 | device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); |
75 | if (device == NULL) | 74 | if (device == NULL) |
76 | return ERR_PTR(-ENOMEM); | 75 | return ERR_PTR(-ENOMEM); |
77 | memset(device, 0, sizeof (struct dasd_device)); | ||
78 | /* open_count = 0 means device online but not in use */ | 76 | /* open_count = 0 means device online but not in use */ |
79 | atomic_set(&device->open_count, -1); | 77 | atomic_set(&device->open_count, -1); |
80 | 78 | ||
@@ -151,6 +149,8 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
151 | static inline void | 149 | static inline void |
152 | dasd_state_known_to_new(struct dasd_device * device) | 150 | dasd_state_known_to_new(struct dasd_device * device) |
153 | { | 151 | { |
152 | /* Disable extended error reporting for this device. */ | ||
153 | dasd_eer_disable(device); | ||
154 | /* Forget the discipline information. */ | 154 | /* Forget the discipline information. */ |
155 | if (device->discipline) | 155 | if (device->discipline) |
156 | module_put(device->discipline->owner); | 156 | module_put(device->discipline->owner); |
@@ -541,33 +541,29 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize, | |||
541 | struct dasd_ccw_req *cqr; | 541 | struct dasd_ccw_req *cqr; |
542 | 542 | ||
543 | /* Sanity checks */ | 543 | /* Sanity checks */ |
544 | if ( magic == NULL || datasize > PAGE_SIZE || | 544 | BUG_ON( magic == NULL || datasize > PAGE_SIZE || |
545 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE) | 545 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); |
546 | BUG(); | ||
547 | 546 | ||
548 | cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); | 547 | cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); |
549 | if (cqr == NULL) | 548 | if (cqr == NULL) |
550 | return ERR_PTR(-ENOMEM); | 549 | return ERR_PTR(-ENOMEM); |
551 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | ||
552 | cqr->cpaddr = NULL; | 550 | cqr->cpaddr = NULL; |
553 | if (cplength > 0) { | 551 | if (cplength > 0) { |
554 | cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1), | 552 | cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), |
555 | GFP_ATOMIC | GFP_DMA); | 553 | GFP_ATOMIC | GFP_DMA); |
556 | if (cqr->cpaddr == NULL) { | 554 | if (cqr->cpaddr == NULL) { |
557 | kfree(cqr); | 555 | kfree(cqr); |
558 | return ERR_PTR(-ENOMEM); | 556 | return ERR_PTR(-ENOMEM); |
559 | } | 557 | } |
560 | memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); | ||
561 | } | 558 | } |
562 | cqr->data = NULL; | 559 | cqr->data = NULL; |
563 | if (datasize > 0) { | 560 | if (datasize > 0) { |
564 | cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA); | 561 | cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); |
565 | if (cqr->data == NULL) { | 562 | if (cqr->data == NULL) { |
566 | kfree(cqr->cpaddr); | 563 | kfree(cqr->cpaddr); |
567 | kfree(cqr); | 564 | kfree(cqr); |
568 | return ERR_PTR(-ENOMEM); | 565 | return ERR_PTR(-ENOMEM); |
569 | } | 566 | } |
570 | memset(cqr->data, 0, datasize); | ||
571 | } | 567 | } |
572 | strncpy((char *) &cqr->magic, magic, 4); | 568 | strncpy((char *) &cqr->magic, magic, 4); |
573 | ASCEBC((char *) &cqr->magic, 4); | 569 | ASCEBC((char *) &cqr->magic, 4); |
@@ -586,9 +582,8 @@ dasd_smalloc_request(char *magic, int cplength, int datasize, | |||
586 | int size; | 582 | int size; |
587 | 583 | ||
588 | /* Sanity checks */ | 584 | /* Sanity checks */ |
589 | if ( magic == NULL || datasize > PAGE_SIZE || | 585 | BUG_ON( magic == NULL || datasize > PAGE_SIZE || |
590 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE) | 586 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); |
591 | BUG(); | ||
592 | 587 | ||
593 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; | 588 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; |
594 | if (cplength > 0) | 589 | if (cplength > 0) |
@@ -892,6 +887,9 @@ dasd_handle_state_change_pending(struct dasd_device *device) | |||
892 | struct dasd_ccw_req *cqr; | 887 | struct dasd_ccw_req *cqr; |
893 | struct list_head *l, *n; | 888 | struct list_head *l, *n; |
894 | 889 | ||
890 | /* First of all start sense subsystem status request. */ | ||
891 | dasd_eer_snss(device); | ||
892 | |||
895 | device->stopped &= ~DASD_STOPPED_PENDING; | 893 | device->stopped &= ~DASD_STOPPED_PENDING; |
896 | 894 | ||
897 | /* restart all 'running' IO on queue */ | 895 | /* restart all 'running' IO on queue */ |
@@ -1111,6 +1109,19 @@ restart: | |||
1111 | } | 1109 | } |
1112 | goto restart; | 1110 | goto restart; |
1113 | } | 1111 | } |
1112 | |||
1113 | /* First of all call extended error reporting. */ | ||
1114 | if (dasd_eer_enabled(device) && | ||
1115 | cqr->status == DASD_CQR_FAILED) { | ||
1116 | dasd_eer_write(device, cqr, DASD_EER_FATALERROR); | ||
1117 | |||
1118 | /* restart request */ | ||
1119 | cqr->status = DASD_CQR_QUEUED; | ||
1120 | cqr->retries = 255; | ||
1121 | device->stopped |= DASD_STOPPED_QUIESCE; | ||
1122 | goto restart; | ||
1123 | } | ||
1124 | |||
1114 | /* Process finished ERP request. */ | 1125 | /* Process finished ERP request. */ |
1115 | if (cqr->refers) { | 1126 | if (cqr->refers) { |
1116 | __dasd_process_erp(device, cqr); | 1127 | __dasd_process_erp(device, cqr); |
@@ -1248,7 +1259,8 @@ __dasd_start_head(struct dasd_device * device) | |||
1248 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1259 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1249 | /* check FAILFAST */ | 1260 | /* check FAILFAST */ |
1250 | if (device->stopped & ~DASD_STOPPED_PENDING && | 1261 | if (device->stopped & ~DASD_STOPPED_PENDING && |
1251 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { | 1262 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && |
1263 | (!dasd_eer_enabled(device))) { | ||
1252 | cqr->status = DASD_CQR_FAILED; | 1264 | cqr->status = DASD_CQR_FAILED; |
1253 | dasd_schedule_bh(device); | 1265 | dasd_schedule_bh(device); |
1254 | } | 1266 | } |
@@ -1807,7 +1819,7 @@ dasd_exit(void) | |||
1807 | #ifdef CONFIG_PROC_FS | 1819 | #ifdef CONFIG_PROC_FS |
1808 | dasd_proc_exit(); | 1820 | dasd_proc_exit(); |
1809 | #endif | 1821 | #endif |
1810 | dasd_ioctl_exit(); | 1822 | dasd_eer_exit(); |
1811 | if (dasd_page_cache != NULL) { | 1823 | if (dasd_page_cache != NULL) { |
1812 | kmem_cache_destroy(dasd_page_cache); | 1824 | kmem_cache_destroy(dasd_page_cache); |
1813 | dasd_page_cache = NULL; | 1825 | dasd_page_cache = NULL; |
@@ -2004,6 +2016,9 @@ dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2004 | switch (event) { | 2016 | switch (event) { |
2005 | case CIO_GONE: | 2017 | case CIO_GONE: |
2006 | case CIO_NO_PATH: | 2018 | case CIO_NO_PATH: |
2019 | /* First of all call extended error reporting. */ | ||
2020 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | ||
2021 | |||
2007 | if (device->state < DASD_STATE_BASIC) | 2022 | if (device->state < DASD_STATE_BASIC) |
2008 | break; | 2023 | break; |
2009 | /* Device is active. We want to keep it. */ | 2024 | /* Device is active. We want to keep it. */ |
@@ -2061,6 +2076,7 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver) | |||
2061 | put_driver(drv); | 2076 | put_driver(drv); |
2062 | } | 2077 | } |
2063 | 2078 | ||
2079 | |||
2064 | static int __init | 2080 | static int __init |
2065 | dasd_init(void) | 2081 | dasd_init(void) |
2066 | { | 2082 | { |
@@ -2093,7 +2109,7 @@ dasd_init(void) | |||
2093 | rc = dasd_parse(); | 2109 | rc = dasd_parse(); |
2094 | if (rc) | 2110 | if (rc) |
2095 | goto failed; | 2111 | goto failed; |
2096 | rc = dasd_ioctl_init(); | 2112 | rc = dasd_eer_init(); |
2097 | if (rc) | 2113 | if (rc) |
2098 | goto failed; | 2114 | goto failed; |
2099 | #ifdef CONFIG_PROC_FS | 2115 | #ifdef CONFIG_PROC_FS |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4ee0f934e325..2ed51562319e 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -1108,6 +1108,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) | |||
1108 | case 0x0B: | 1108 | case 0x0B: |
1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
1110 | "FORMAT F - Volume is suspended duplex"); | 1110 | "FORMAT F - Volume is suspended duplex"); |
1111 | /* call extended error reporting (EER) */ | ||
1112 | dasd_eer_write(device, erp->refers, | ||
1113 | DASD_EER_PPRCSUSPEND); | ||
1111 | break; | 1114 | break; |
1112 | case 0x0C: | 1115 | case 0x0C: |
1113 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1116 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c deleted file mode 100644 index e88f73ee72ce..000000000000 --- a/drivers/s390/block/dasd_cmb.c +++ /dev/null | |||
@@ -1,128 +0,0 @@ | |||
1 | /* | ||
2 | * Linux on zSeries Channel Measurement Facility support | ||
3 | * (dasd device driver interface) | ||
4 | * | ||
5 | * Copyright 2000,2003 IBM Corporation | ||
6 | * | ||
7 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <asm/ccwdev.h> | ||
26 | #include <asm/cmb.h> | ||
27 | |||
28 | #include "dasd_int.h" | ||
29 | |||
30 | static int | ||
31 | dasd_ioctl_cmf_enable(struct block_device *bdev, int no, long args) | ||
32 | { | ||
33 | struct dasd_device *device; | ||
34 | |||
35 | device = bdev->bd_disk->private_data; | ||
36 | if (!device) | ||
37 | return -EINVAL; | ||
38 | |||
39 | return enable_cmf(device->cdev); | ||
40 | } | ||
41 | |||
42 | static int | ||
43 | dasd_ioctl_cmf_disable(struct block_device *bdev, int no, long args) | ||
44 | { | ||
45 | struct dasd_device *device; | ||
46 | |||
47 | device = bdev->bd_disk->private_data; | ||
48 | if (!device) | ||
49 | return -EINVAL; | ||
50 | |||
51 | return disable_cmf(device->cdev); | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args) | ||
56 | { | ||
57 | struct dasd_device *device; | ||
58 | struct cmbdata __user *udata; | ||
59 | struct cmbdata data; | ||
60 | size_t size; | ||
61 | int ret; | ||
62 | |||
63 | device = bdev->bd_disk->private_data; | ||
64 | if (!device) | ||
65 | return -EINVAL; | ||
66 | udata = (void __user *) args; | ||
67 | size = _IOC_SIZE(no); | ||
68 | |||
69 | if (!access_ok(VERIFY_WRITE, udata, size)) | ||
70 | return -EFAULT; | ||
71 | ret = cmf_readall(device->cdev, &data); | ||
72 | if (ret) | ||
73 | return ret; | ||
74 | if (copy_to_user(udata, &data, min(size, sizeof(*udata)))) | ||
75 | return -EFAULT; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* module initialization below here. dasd already provides a mechanism | ||
80 | * to dynamically register ioctl functions, so we simply use this. */ | ||
81 | static inline int | ||
82 | ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler) | ||
83 | { | ||
84 | return dasd_ioctl_no_register(THIS_MODULE, no, handler); | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler) | ||
89 | { | ||
90 | dasd_ioctl_no_unregister(THIS_MODULE, no, handler); | ||
91 | } | ||
92 | |||
93 | static void | ||
94 | dasd_cmf_exit(void) | ||
95 | { | ||
96 | ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable); | ||
97 | ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable); | ||
98 | ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb); | ||
99 | } | ||
100 | |||
101 | static int __init | ||
102 | dasd_cmf_init(void) | ||
103 | { | ||
104 | int ret; | ||
105 | ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable); | ||
106 | if (ret) | ||
107 | goto err; | ||
108 | ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable); | ||
109 | if (ret) | ||
110 | goto err; | ||
111 | ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb); | ||
112 | if (ret) | ||
113 | goto err; | ||
114 | |||
115 | return 0; | ||
116 | err: | ||
117 | dasd_cmf_exit(); | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | module_init(dasd_cmf_init); | ||
123 | module_exit(dasd_cmf_exit); | ||
124 | |||
125 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); | ||
126 | MODULE_LICENSE("GPL"); | ||
127 | MODULE_DESCRIPTION("channel measurement facility interface for dasd\n" | ||
128 | "Copyright 2003 IBM Corporation\n"); | ||
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 1629b27c48ab..c1c6f1381150 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/config.h> | 16 | #include <linux/config.h> |
17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/module.h> | ||
19 | 20 | ||
20 | #include <asm/debug.h> | 21 | #include <asm/debug.h> |
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
@@ -69,6 +70,8 @@ int dasd_autodetect = 0; /* is true, when autodetection is active */ | |||
69 | * strings when running as a module. | 70 | * strings when running as a module. |
70 | */ | 71 | */ |
71 | static char *dasd[256]; | 72 | static char *dasd[256]; |
73 | module_param_array(dasd, charp, NULL, 0); | ||
74 | |||
72 | /* | 75 | /* |
73 | * Single spinlock to protect devmap structures and lists. | 76 | * Single spinlock to protect devmap structures and lists. |
74 | */ | 77 | */ |
@@ -434,8 +437,7 @@ dasd_forget_ranges(void) | |||
434 | spin_lock(&dasd_devmap_lock); | 437 | spin_lock(&dasd_devmap_lock); |
435 | for (i = 0; i < 256; i++) { | 438 | for (i = 0; i < 256; i++) { |
436 | list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) { | 439 | list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) { |
437 | if (devmap->device != NULL) | 440 | BUG_ON(devmap->device != NULL); |
438 | BUG(); | ||
439 | list_del(&devmap->list); | 441 | list_del(&devmap->list); |
440 | kfree(devmap); | 442 | kfree(devmap); |
441 | } | 443 | } |
@@ -544,8 +546,7 @@ dasd_delete_device(struct dasd_device *device) | |||
544 | 546 | ||
545 | /* First remove device pointer from devmap. */ | 547 | /* First remove device pointer from devmap. */ |
546 | devmap = dasd_find_busid(device->cdev->dev.bus_id); | 548 | devmap = dasd_find_busid(device->cdev->dev.bus_id); |
547 | if (IS_ERR(devmap)) | 549 | BUG_ON(IS_ERR(devmap)); |
548 | BUG(); | ||
549 | spin_lock(&dasd_devmap_lock); | 550 | spin_lock(&dasd_devmap_lock); |
550 | if (devmap->device != device) { | 551 | if (devmap->device != device) { |
551 | spin_unlock(&dasd_devmap_lock); | 552 | spin_unlock(&dasd_devmap_lock); |
@@ -715,10 +716,51 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *bu | |||
715 | 716 | ||
716 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); | 717 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); |
717 | 718 | ||
719 | /* | ||
720 | * extended error-reporting | ||
721 | */ | ||
722 | static ssize_t | ||
723 | dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
724 | { | ||
725 | struct dasd_devmap *devmap; | ||
726 | int eer_flag; | ||
727 | |||
728 | devmap = dasd_find_busid(dev->bus_id); | ||
729 | if (!IS_ERR(devmap) && devmap->device) | ||
730 | eer_flag = dasd_eer_enabled(devmap->device); | ||
731 | else | ||
732 | eer_flag = 0; | ||
733 | return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n"); | ||
734 | } | ||
735 | |||
736 | static ssize_t | ||
737 | dasd_eer_store(struct device *dev, struct device_attribute *attr, | ||
738 | const char *buf, size_t count) | ||
739 | { | ||
740 | struct dasd_devmap *devmap; | ||
741 | int rc; | ||
742 | |||
743 | devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); | ||
744 | if (IS_ERR(devmap)) | ||
745 | return PTR_ERR(devmap); | ||
746 | if (!devmap->device) | ||
747 | return count; | ||
748 | if (buf[0] == '1') { | ||
749 | rc = dasd_eer_enable(devmap->device); | ||
750 | if (rc) | ||
751 | return rc; | ||
752 | } else | ||
753 | dasd_eer_disable(devmap->device); | ||
754 | return count; | ||
755 | } | ||
756 | |||
757 | static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); | ||
758 | |||
718 | static struct attribute * dasd_attrs[] = { | 759 | static struct attribute * dasd_attrs[] = { |
719 | &dev_attr_readonly.attr, | 760 | &dev_attr_readonly.attr, |
720 | &dev_attr_discipline.attr, | 761 | &dev_attr_discipline.attr, |
721 | &dev_attr_use_diag.attr, | 762 | &dev_attr_use_diag.attr, |
763 | &dev_attr_eer_enabled.attr, | ||
722 | NULL, | 764 | NULL, |
723 | }; | 765 | }; |
724 | 766 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 822e2a265578..ee09ef33d08d 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1227,19 +1227,14 @@ dasd_eckd_fill_info(struct dasd_device * device, | |||
1227 | * (see dasd_eckd_reserve) device. | 1227 | * (see dasd_eckd_reserve) device. |
1228 | */ | 1228 | */ |
1229 | static int | 1229 | static int |
1230 | dasd_eckd_release(struct block_device *bdev, int no, long args) | 1230 | dasd_eckd_release(struct dasd_device *device) |
1231 | { | 1231 | { |
1232 | struct dasd_device *device; | ||
1233 | struct dasd_ccw_req *cqr; | 1232 | struct dasd_ccw_req *cqr; |
1234 | int rc; | 1233 | int rc; |
1235 | 1234 | ||
1236 | if (!capable(CAP_SYS_ADMIN)) | 1235 | if (!capable(CAP_SYS_ADMIN)) |
1237 | return -EACCES; | 1236 | return -EACCES; |
1238 | 1237 | ||
1239 | device = bdev->bd_disk->private_data; | ||
1240 | if (device == NULL) | ||
1241 | return -ENODEV; | ||
1242 | |||
1243 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1238 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1244 | 1, 32, device); | 1239 | 1, 32, device); |
1245 | if (IS_ERR(cqr)) { | 1240 | if (IS_ERR(cqr)) { |
@@ -1272,19 +1267,14 @@ dasd_eckd_release(struct block_device *bdev, int no, long args) | |||
1272 | * the interrupt is outstanding for a certain time. | 1267 | * the interrupt is outstanding for a certain time. |
1273 | */ | 1268 | */ |
1274 | static int | 1269 | static int |
1275 | dasd_eckd_reserve(struct block_device *bdev, int no, long args) | 1270 | dasd_eckd_reserve(struct dasd_device *device) |
1276 | { | 1271 | { |
1277 | struct dasd_device *device; | ||
1278 | struct dasd_ccw_req *cqr; | 1272 | struct dasd_ccw_req *cqr; |
1279 | int rc; | 1273 | int rc; |
1280 | 1274 | ||
1281 | if (!capable(CAP_SYS_ADMIN)) | 1275 | if (!capable(CAP_SYS_ADMIN)) |
1282 | return -EACCES; | 1276 | return -EACCES; |
1283 | 1277 | ||
1284 | device = bdev->bd_disk->private_data; | ||
1285 | if (device == NULL) | ||
1286 | return -ENODEV; | ||
1287 | |||
1288 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1278 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1289 | 1, 32, device); | 1279 | 1, 32, device); |
1290 | if (IS_ERR(cqr)) { | 1280 | if (IS_ERR(cqr)) { |
@@ -1316,19 +1306,14 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args) | |||
1316 | * (unconditional reserve) | 1306 | * (unconditional reserve) |
1317 | */ | 1307 | */ |
1318 | static int | 1308 | static int |
1319 | dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) | 1309 | dasd_eckd_steal_lock(struct dasd_device *device) |
1320 | { | 1310 | { |
1321 | struct dasd_device *device; | ||
1322 | struct dasd_ccw_req *cqr; | 1311 | struct dasd_ccw_req *cqr; |
1323 | int rc; | 1312 | int rc; |
1324 | 1313 | ||
1325 | if (!capable(CAP_SYS_ADMIN)) | 1314 | if (!capable(CAP_SYS_ADMIN)) |
1326 | return -EACCES; | 1315 | return -EACCES; |
1327 | 1316 | ||
1328 | device = bdev->bd_disk->private_data; | ||
1329 | if (device == NULL) | ||
1330 | return -ENODEV; | ||
1331 | |||
1332 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1317 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1333 | 1, 32, device); | 1318 | 1, 32, device); |
1334 | if (IS_ERR(cqr)) { | 1319 | if (IS_ERR(cqr)) { |
@@ -1358,19 +1343,14 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) | |||
1358 | * Read performance statistics | 1343 | * Read performance statistics |
1359 | */ | 1344 | */ |
1360 | static int | 1345 | static int |
1361 | dasd_eckd_performance(struct block_device *bdev, int no, long args) | 1346 | dasd_eckd_performance(struct dasd_device *device, void __user *argp) |
1362 | { | 1347 | { |
1363 | struct dasd_device *device; | ||
1364 | struct dasd_psf_prssd_data *prssdp; | 1348 | struct dasd_psf_prssd_data *prssdp; |
1365 | struct dasd_rssd_perf_stats_t *stats; | 1349 | struct dasd_rssd_perf_stats_t *stats; |
1366 | struct dasd_ccw_req *cqr; | 1350 | struct dasd_ccw_req *cqr; |
1367 | struct ccw1 *ccw; | 1351 | struct ccw1 *ccw; |
1368 | int rc; | 1352 | int rc; |
1369 | 1353 | ||
1370 | device = bdev->bd_disk->private_data; | ||
1371 | if (device == NULL) | ||
1372 | return -ENODEV; | ||
1373 | |||
1374 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1354 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1375 | 1 /* PSF */ + 1 /* RSSD */ , | 1355 | 1 /* PSF */ + 1 /* RSSD */ , |
1376 | (sizeof (struct dasd_psf_prssd_data) + | 1356 | (sizeof (struct dasd_psf_prssd_data) + |
@@ -1414,8 +1394,9 @@ dasd_eckd_performance(struct block_device *bdev, int no, long args) | |||
1414 | /* Prepare for Read Subsystem Data */ | 1394 | /* Prepare for Read Subsystem Data */ |
1415 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | 1395 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; |
1416 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); | 1396 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); |
1417 | rc = copy_to_user((long __user *) args, (long *) stats, | 1397 | if (copy_to_user(argp, stats, |
1418 | sizeof(struct dasd_rssd_perf_stats_t)); | 1398 | sizeof(struct dasd_rssd_perf_stats_t))) |
1399 | rc = -EFAULT; | ||
1419 | } | 1400 | } |
1420 | dasd_sfree_request(cqr, cqr->device); | 1401 | dasd_sfree_request(cqr, cqr->device); |
1421 | return rc; | 1402 | return rc; |
@@ -1426,27 +1407,22 @@ dasd_eckd_performance(struct block_device *bdev, int no, long args) | |||
1426 | * Returnes the cache attributes used in Define Extend (DE). | 1407 | * Returnes the cache attributes used in Define Extend (DE). |
1427 | */ | 1408 | */ |
1428 | static int | 1409 | static int |
1429 | dasd_eckd_get_attrib (struct block_device *bdev, int no, long args) | 1410 | dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) |
1430 | { | 1411 | { |
1431 | struct dasd_device *device; | 1412 | struct dasd_eckd_private *private = |
1432 | struct dasd_eckd_private *private; | 1413 | (struct dasd_eckd_private *)device->private; |
1433 | struct attrib_data_t attrib; | 1414 | struct attrib_data_t attrib = private->attrib; |
1434 | int rc; | 1415 | int rc; |
1435 | 1416 | ||
1436 | if (!capable(CAP_SYS_ADMIN)) | 1417 | if (!capable(CAP_SYS_ADMIN)) |
1437 | return -EACCES; | 1418 | return -EACCES; |
1438 | if (!args) | 1419 | if (!argp) |
1439 | return -EINVAL; | 1420 | return -EINVAL; |
1440 | 1421 | ||
1441 | device = bdev->bd_disk->private_data; | 1422 | rc = 0; |
1442 | if (device == NULL) | 1423 | if (copy_to_user(argp, (long *) &attrib, |
1443 | return -ENODEV; | 1424 | sizeof (struct attrib_data_t))) |
1444 | 1425 | rc = -EFAULT; | |
1445 | private = (struct dasd_eckd_private *) device->private; | ||
1446 | attrib = private->attrib; | ||
1447 | |||
1448 | rc = copy_to_user((long __user *) args, (long *) &attrib, | ||
1449 | sizeof (struct attrib_data_t)); | ||
1450 | 1426 | ||
1451 | return rc; | 1427 | return rc; |
1452 | } | 1428 | } |
@@ -1456,26 +1432,19 @@ dasd_eckd_get_attrib (struct block_device *bdev, int no, long args) | |||
1456 | * Stores the attributes for cache operation to be used in Define Extend (DE). | 1432 | * Stores the attributes for cache operation to be used in Define Extend (DE). |
1457 | */ | 1433 | */ |
1458 | static int | 1434 | static int |
1459 | dasd_eckd_set_attrib(struct block_device *bdev, int no, long args) | 1435 | dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) |
1460 | { | 1436 | { |
1461 | struct dasd_device *device; | 1437 | struct dasd_eckd_private *private = |
1462 | struct dasd_eckd_private *private; | 1438 | (struct dasd_eckd_private *)device->private; |
1463 | struct attrib_data_t attrib; | 1439 | struct attrib_data_t attrib; |
1464 | 1440 | ||
1465 | if (!capable(CAP_SYS_ADMIN)) | 1441 | if (!capable(CAP_SYS_ADMIN)) |
1466 | return -EACCES; | 1442 | return -EACCES; |
1467 | if (!args) | 1443 | if (!argp) |
1468 | return -EINVAL; | 1444 | return -EINVAL; |
1469 | 1445 | ||
1470 | device = bdev->bd_disk->private_data; | 1446 | if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) |
1471 | if (device == NULL) | ||
1472 | return -ENODEV; | ||
1473 | |||
1474 | if (copy_from_user(&attrib, (void __user *) args, | ||
1475 | sizeof (struct attrib_data_t))) { | ||
1476 | return -EFAULT; | 1447 | return -EFAULT; |
1477 | } | ||
1478 | private = (struct dasd_eckd_private *) device->private; | ||
1479 | private->attrib = attrib; | 1448 | private->attrib = attrib; |
1480 | 1449 | ||
1481 | DEV_MESSAGE(KERN_INFO, device, | 1450 | DEV_MESSAGE(KERN_INFO, device, |
@@ -1484,6 +1453,27 @@ dasd_eckd_set_attrib(struct block_device *bdev, int no, long args) | |||
1484 | return 0; | 1453 | return 0; |
1485 | } | 1454 | } |
1486 | 1455 | ||
1456 | static int | ||
1457 | dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) | ||
1458 | { | ||
1459 | switch (cmd) { | ||
1460 | case BIODASDGATTR: | ||
1461 | return dasd_eckd_get_attrib(device, argp); | ||
1462 | case BIODASDSATTR: | ||
1463 | return dasd_eckd_set_attrib(device, argp); | ||
1464 | case BIODASDPSRD: | ||
1465 | return dasd_eckd_performance(device, argp); | ||
1466 | case BIODASDRLSE: | ||
1467 | return dasd_eckd_release(device); | ||
1468 | case BIODASDRSRV: | ||
1469 | return dasd_eckd_reserve(device); | ||
1470 | case BIODASDSLCK: | ||
1471 | return dasd_eckd_steal_lock(device); | ||
1472 | default: | ||
1473 | return -ENOIOCTLCMD; | ||
1474 | } | ||
1475 | } | ||
1476 | |||
1487 | /* | 1477 | /* |
1488 | * Print sense data and related channel program. | 1478 | * Print sense data and related channel program. |
1489 | * Parts are printed because printk buffer is only 1024 bytes. | 1479 | * Parts are printed because printk buffer is only 1024 bytes. |
@@ -1642,6 +1632,7 @@ static struct dasd_discipline dasd_eckd_discipline = { | |||
1642 | .free_cp = dasd_eckd_free_cp, | 1632 | .free_cp = dasd_eckd_free_cp, |
1643 | .dump_sense = dasd_eckd_dump_sense, | 1633 | .dump_sense = dasd_eckd_dump_sense, |
1644 | .fill_info = dasd_eckd_fill_info, | 1634 | .fill_info = dasd_eckd_fill_info, |
1635 | .ioctl = dasd_eckd_ioctl, | ||
1645 | }; | 1636 | }; |
1646 | 1637 | ||
1647 | static int __init | 1638 | static int __init |
@@ -1649,59 +1640,18 @@ dasd_eckd_init(void) | |||
1649 | { | 1640 | { |
1650 | int ret; | 1641 | int ret; |
1651 | 1642 | ||
1652 | dasd_ioctl_no_register(THIS_MODULE, BIODASDGATTR, | ||
1653 | dasd_eckd_get_attrib); | ||
1654 | dasd_ioctl_no_register(THIS_MODULE, BIODASDSATTR, | ||
1655 | dasd_eckd_set_attrib); | ||
1656 | dasd_ioctl_no_register(THIS_MODULE, BIODASDPSRD, | ||
1657 | dasd_eckd_performance); | ||
1658 | dasd_ioctl_no_register(THIS_MODULE, BIODASDRLSE, | ||
1659 | dasd_eckd_release); | ||
1660 | dasd_ioctl_no_register(THIS_MODULE, BIODASDRSRV, | ||
1661 | dasd_eckd_reserve); | ||
1662 | dasd_ioctl_no_register(THIS_MODULE, BIODASDSLCK, | ||
1663 | dasd_eckd_steal_lock); | ||
1664 | |||
1665 | ASCEBC(dasd_eckd_discipline.ebcname, 4); | 1643 | ASCEBC(dasd_eckd_discipline.ebcname, 4); |
1666 | 1644 | ||
1667 | ret = ccw_driver_register(&dasd_eckd_driver); | 1645 | ret = ccw_driver_register(&dasd_eckd_driver); |
1668 | if (ret) { | 1646 | if (!ret) |
1669 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR, | 1647 | dasd_generic_auto_online(&dasd_eckd_driver); |
1670 | dasd_eckd_get_attrib); | 1648 | return ret; |
1671 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR, | ||
1672 | dasd_eckd_set_attrib); | ||
1673 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD, | ||
1674 | dasd_eckd_performance); | ||
1675 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE, | ||
1676 | dasd_eckd_release); | ||
1677 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV, | ||
1678 | dasd_eckd_reserve); | ||
1679 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK, | ||
1680 | dasd_eckd_steal_lock); | ||
1681 | return ret; | ||
1682 | } | ||
1683 | |||
1684 | dasd_generic_auto_online(&dasd_eckd_driver); | ||
1685 | return 0; | ||
1686 | } | 1649 | } |
1687 | 1650 | ||
1688 | static void __exit | 1651 | static void __exit |
1689 | dasd_eckd_cleanup(void) | 1652 | dasd_eckd_cleanup(void) |
1690 | { | 1653 | { |
1691 | ccw_driver_unregister(&dasd_eckd_driver); | 1654 | ccw_driver_unregister(&dasd_eckd_driver); |
1692 | |||
1693 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR, | ||
1694 | dasd_eckd_get_attrib); | ||
1695 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR, | ||
1696 | dasd_eckd_set_attrib); | ||
1697 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD, | ||
1698 | dasd_eckd_performance); | ||
1699 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE, | ||
1700 | dasd_eckd_release); | ||
1701 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV, | ||
1702 | dasd_eckd_reserve); | ||
1703 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK, | ||
1704 | dasd_eckd_steal_lock); | ||
1705 | } | 1655 | } |
1706 | 1656 | ||
1707 | module_init(dasd_eckd_init); | 1657 | module_init(dasd_eckd_init); |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index bc3823d35223..ad8524bb7bb3 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define DASD_ECKD_CCW_PSF 0x27 | 29 | #define DASD_ECKD_CCW_PSF 0x27 |
30 | #define DASD_ECKD_CCW_RSSD 0x3e | 30 | #define DASD_ECKD_CCW_RSSD 0x3e |
31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 | 31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 |
32 | #define DASD_ECKD_CCW_SNSS 0x54 | ||
32 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 | 33 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 |
33 | #define DASD_ECKD_CCW_WRITE_MT 0x85 | 34 | #define DASD_ECKD_CCW_WRITE_MT 0x85 |
34 | #define DASD_ECKD_CCW_READ_MT 0x86 | 35 | #define DASD_ECKD_CCW_READ_MT 0x86 |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c new file mode 100644 index 000000000000..2d946b6ca074 --- /dev/null +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -0,0 +1,682 @@ | |||
1 | /* | ||
2 | * Character device driver for extended error reporting. | ||
3 | * | ||
4 | * Copyright (C) 2005 IBM Corporation | ||
5 | * extended error reporting for DASD ECKD devices | ||
6 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/poll.h> | ||
17 | |||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/semaphore.h> | ||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/ebcdic.h> | ||
22 | |||
23 | #include "dasd_int.h" | ||
24 | #include "dasd_eckd.h" | ||
25 | |||
26 | #ifdef PRINTK_HEADER | ||
27 | #undef PRINTK_HEADER | ||
28 | #endif /* PRINTK_HEADER */ | ||
29 | #define PRINTK_HEADER "dasd(eer):" | ||
30 | |||
31 | /* | ||
32 | * SECTION: the internal buffer | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * The internal buffer is meant to store obaque blobs of data, so it does | ||
37 | * not know of higher level concepts like triggers. | ||
38 | * It consists of a number of pages that are used as a ringbuffer. Each data | ||
39 | * blob is stored in a simple record that consists of an integer, which | ||
40 | * contains the size of the following data, and the data bytes themselfes. | ||
41 | * | ||
42 | * To allow for multiple independent readers we create one internal buffer | ||
43 | * each time the device is opened and destroy the buffer when the file is | ||
44 | * closed again. The number of pages used for this buffer is determined by | ||
45 | * the module parmeter eer_pages. | ||
46 | * | ||
47 | * One record can be written to a buffer by using the functions | ||
48 | * - dasd_eer_start_record (one time per record to write the size to the | ||
49 | * buffer and reserve the space for the data) | ||
50 | * - dasd_eer_write_buffer (one or more times per record to write the data) | ||
51 | * The data can be written in several steps but you will have to compute | ||
52 | * the total size up front for the invocation of dasd_eer_start_record. | ||
53 | * If the ringbuffer is full, dasd_eer_start_record will remove the required | ||
54 | * number of old records. | ||
55 | * | ||
56 | * A record is typically read in two steps, first read the integer that | ||
57 | * specifies the size of the following data, then read the data. | ||
58 | * Both can be done by | ||
59 | * - dasd_eer_read_buffer | ||
60 | * | ||
61 | * For all mentioned functions you need to get the bufferlock first and keep | ||
62 | * it until a complete record is written or read. | ||
63 | * | ||
64 | * All information necessary to keep track of an internal buffer is kept in | ||
65 | * a struct eerbuffer. The buffer specific to a file pointer is strored in | ||
66 | * the private_data field of that file. To be able to write data to all | ||
67 | * existing buffers, each buffer is also added to the bufferlist. | ||
68 | * If the user does not want to read a complete record in one go, we have to | ||
69 | * keep track of the rest of the record. residual stores the number of bytes | ||
70 | * that are still to deliver. If the rest of the record is invalidated between | ||
71 | * two reads then residual will be set to -1 so that the next read will fail. | ||
72 | * All entries in the eerbuffer structure are protected with the bufferlock. | ||
73 | * To avoid races between writing to a buffer on the one side and creating | ||
74 | * and destroying buffers on the other side, the bufferlock must also be used | ||
75 | * to protect the bufferlist. | ||
76 | */ | ||
77 | |||
78 | static int eer_pages = 5; | ||
79 | module_param(eer_pages, int, S_IRUGO|S_IWUSR); | ||
80 | |||
81 | struct eerbuffer { | ||
82 | struct list_head list; | ||
83 | char **buffer; | ||
84 | int buffersize; | ||
85 | int buffer_page_count; | ||
86 | int head; | ||
87 | int tail; | ||
88 | int residual; | ||
89 | }; | ||
90 | |||
91 | static LIST_HEAD(bufferlist); | ||
92 | static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; | ||
93 | static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); | ||
94 | |||
95 | /* | ||
96 | * How many free bytes are available on the buffer. | ||
97 | * Needs to be called with bufferlock held. | ||
98 | */ | ||
99 | static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) | ||
100 | { | ||
101 | if (eerb->head < eerb->tail) | ||
102 | return eerb->tail - eerb->head - 1; | ||
103 | return eerb->buffersize - eerb->head + eerb->tail -1; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * How many bytes of buffer space are used. | ||
108 | * Needs to be called with bufferlock held. | ||
109 | */ | ||
110 | static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) | ||
111 | { | ||
112 | |||
113 | if (eerb->head >= eerb->tail) | ||
114 | return eerb->head - eerb->tail; | ||
115 | return eerb->buffersize - eerb->tail + eerb->head; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * The dasd_eer_write_buffer function just copies count bytes of data | ||
120 | * to the buffer. Make sure to call dasd_eer_start_record first, to | ||
121 | * make sure that enough free space is available. | ||
122 | * Needs to be called with bufferlock held. | ||
123 | */ | ||
124 | static void dasd_eer_write_buffer(struct eerbuffer *eerb, | ||
125 | char *data, int count) | ||
126 | { | ||
127 | |||
128 | unsigned long headindex,localhead; | ||
129 | unsigned long rest, len; | ||
130 | char *nextdata; | ||
131 | |||
132 | nextdata = data; | ||
133 | rest = count; | ||
134 | while (rest > 0) { | ||
135 | headindex = eerb->head / PAGE_SIZE; | ||
136 | localhead = eerb->head % PAGE_SIZE; | ||
137 | len = min(rest, PAGE_SIZE - localhead); | ||
138 | memcpy(eerb->buffer[headindex]+localhead, nextdata, len); | ||
139 | nextdata += len; | ||
140 | rest -= len; | ||
141 | eerb->head += len; | ||
142 | if (eerb->head == eerb->buffersize) | ||
143 | eerb->head = 0; /* wrap around */ | ||
144 | BUG_ON(eerb->head > eerb->buffersize); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Needs to be called with bufferlock held. | ||
150 | */ | ||
151 | static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) | ||
152 | { | ||
153 | |||
154 | unsigned long tailindex,localtail; | ||
155 | unsigned long rest, len, finalcount; | ||
156 | char *nextdata; | ||
157 | |||
158 | finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); | ||
159 | nextdata = data; | ||
160 | rest = finalcount; | ||
161 | while (rest > 0) { | ||
162 | tailindex = eerb->tail / PAGE_SIZE; | ||
163 | localtail = eerb->tail % PAGE_SIZE; | ||
164 | len = min(rest, PAGE_SIZE - localtail); | ||
165 | memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); | ||
166 | nextdata += len; | ||
167 | rest -= len; | ||
168 | eerb->tail += len; | ||
169 | if (eerb->tail == eerb->buffersize) | ||
170 | eerb->tail = 0; /* wrap around */ | ||
171 | BUG_ON(eerb->tail > eerb->buffersize); | ||
172 | } | ||
173 | return finalcount; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Whenever you want to write a blob of data to the internal buffer you | ||
178 | * have to start by using this function first. It will write the number | ||
179 | * of bytes that will be written to the buffer. If necessary it will remove | ||
180 | * old records to make room for the new one. | ||
181 | * Needs to be called with bufferlock held. | ||
182 | */ | ||
183 | static int dasd_eer_start_record(struct eerbuffer *eerb, int count) | ||
184 | { | ||
185 | int tailcount; | ||
186 | |||
187 | if (count + sizeof(count) > eerb->buffersize) | ||
188 | return -ENOMEM; | ||
189 | while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { | ||
190 | if (eerb->residual > 0) { | ||
191 | eerb->tail += eerb->residual; | ||
192 | if (eerb->tail >= eerb->buffersize) | ||
193 | eerb->tail -= eerb->buffersize; | ||
194 | eerb->residual = -1; | ||
195 | } | ||
196 | dasd_eer_read_buffer(eerb, (char *) &tailcount, | ||
197 | sizeof(tailcount)); | ||
198 | eerb->tail += tailcount; | ||
199 | if (eerb->tail >= eerb->buffersize) | ||
200 | eerb->tail -= eerb->buffersize; | ||
201 | } | ||
202 | dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); | ||
203 | |||
204 | return 0; | ||
205 | }; | ||
206 | |||
207 | /* | ||
208 | * Release pages that are not used anymore. | ||
209 | */ | ||
210 | static void dasd_eer_free_buffer_pages(char **buf, int no_pages) | ||
211 | { | ||
212 | int i; | ||
213 | |||
214 | for (i = 0; i < no_pages; i++) | ||
215 | free_page((unsigned long) buf[i]); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Allocate a new set of memory pages. | ||
220 | */ | ||
221 | static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) | ||
222 | { | ||
223 | int i; | ||
224 | |||
225 | for (i = 0; i < no_pages; i++) { | ||
226 | buf[i] = (char *) get_zeroed_page(GFP_KERNEL); | ||
227 | if (!buf[i]) { | ||
228 | dasd_eer_free_buffer_pages(buf, i); | ||
229 | return -ENOMEM; | ||
230 | } | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * SECTION: The extended error reporting functionality | ||
237 | */ | ||
238 | |||
239 | /* | ||
240 | * When a DASD device driver wants to report an error, it calls the | ||
241 | * function dasd_eer_write and gives the respective trigger ID as | ||
242 | * parameter. Currently there are four kinds of triggers: | ||
243 | * | ||
244 | * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems | ||
245 | * DASD_EER_PPRCSUSPEND: PPRC was suspended | ||
246 | * DASD_EER_NOPATH: There is no path to the device left. | ||
247 | * DASD_EER_STATECHANGE: The state of the device has changed. | ||
248 | * | ||
249 | * For the first three triggers all required information can be supplied by | ||
250 | * the caller. For these triggers a record is written by the function | ||
251 | * dasd_eer_write_standard_trigger. | ||
252 | * | ||
253 | * The DASD_EER_STATECHANGE trigger is special since a sense subsystem | ||
254 | * status ccw need to be executed to gather the necessary sense data first. | ||
255 | * The dasd_eer_snss function will queue the SNSS request and the request | ||
256 | * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE | ||
257 | * trigger. | ||
258 | * | ||
259 | * To avoid memory allocations at runtime, the necessary memory is allocated | ||
260 | * when the extended error reporting is enabled for a device (by | ||
261 | * dasd_eer_probe). There is one sense subsystem status request for each | ||
262 | * eer enabled DASD device. The presence of the cqr in device->eer_cqr | ||
263 | * indicates that eer is enable for the device. The use of the snss request | ||
264 | * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates | ||
265 | * that the cqr is currently in use, dasd_eer_snss cannot start a second | ||
266 | * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of | ||
267 | * the SNSS request will check the bit and call dasd_eer_snss again. | ||
268 | */ | ||
269 | |||
270 | #define SNSS_DATA_SIZE 44 | ||
271 | |||
272 | #define DASD_EER_BUSID_SIZE 10 | ||
273 | struct dasd_eer_header { | ||
274 | __u32 total_size; | ||
275 | __u32 trigger; | ||
276 | __u64 tv_sec; | ||
277 | __u64 tv_usec; | ||
278 | char busid[DASD_EER_BUSID_SIZE]; | ||
279 | }; | ||
280 | |||
281 | /* | ||
282 | * The following function can be used for those triggers that have | ||
283 | * all necessary data available when the function is called. | ||
284 | * If the parameter cqr is not NULL, the chain of requests will be searched | ||
285 | * for valid sense data, and all valid sense data sets will be added to | ||
286 | * the triggers data. | ||
287 | */ | ||
288 | static void dasd_eer_write_standard_trigger(struct dasd_device *device, | ||
289 | struct dasd_ccw_req *cqr, | ||
290 | int trigger) | ||
291 | { | ||
292 | struct dasd_ccw_req *temp_cqr; | ||
293 | int data_size; | ||
294 | struct timeval tv; | ||
295 | struct dasd_eer_header header; | ||
296 | unsigned long flags; | ||
297 | struct eerbuffer *eerb; | ||
298 | |||
299 | /* go through cqr chain and count the valid sense data sets */ | ||
300 | data_size = 0; | ||
301 | for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) | ||
302 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
303 | data_size += 32; | ||
304 | |||
305 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
306 | header.trigger = trigger; | ||
307 | do_gettimeofday(&tv); | ||
308 | header.tv_sec = tv.tv_sec; | ||
309 | header.tv_usec = tv.tv_usec; | ||
310 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
311 | |||
312 | spin_lock_irqsave(&bufferlock, flags); | ||
313 | list_for_each_entry(eerb, &bufferlist, list) { | ||
314 | dasd_eer_start_record(eerb, header.total_size); | ||
315 | dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); | ||
316 | for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) | ||
317 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
318 | dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); | ||
319 | dasd_eer_write_buffer(eerb, "EOR", 4); | ||
320 | } | ||
321 | spin_unlock_irqrestore(&bufferlock, flags); | ||
322 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * This function writes a DASD_EER_STATECHANGE trigger. | ||
327 | */ | ||
328 | static void dasd_eer_write_snss_trigger(struct dasd_device *device, | ||
329 | struct dasd_ccw_req *cqr, | ||
330 | int trigger) | ||
331 | { | ||
332 | int data_size; | ||
333 | int snss_rc; | ||
334 | struct timeval tv; | ||
335 | struct dasd_eer_header header; | ||
336 | unsigned long flags; | ||
337 | struct eerbuffer *eerb; | ||
338 | |||
339 | snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | ||
340 | if (snss_rc) | ||
341 | data_size = 0; | ||
342 | else | ||
343 | data_size = SNSS_DATA_SIZE; | ||
344 | |||
345 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
346 | header.trigger = DASD_EER_STATECHANGE; | ||
347 | do_gettimeofday(&tv); | ||
348 | header.tv_sec = tv.tv_sec; | ||
349 | header.tv_usec = tv.tv_usec; | ||
350 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
351 | |||
352 | spin_lock_irqsave(&bufferlock, flags); | ||
353 | list_for_each_entry(eerb, &bufferlist, list) { | ||
354 | dasd_eer_start_record(eerb, header.total_size); | ||
355 | dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); | ||
356 | if (!snss_rc) | ||
357 | dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); | ||
358 | dasd_eer_write_buffer(eerb, "EOR", 4); | ||
359 | } | ||
360 | spin_unlock_irqrestore(&bufferlock, flags); | ||
361 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * This function is called for all triggers. It calls the appropriate | ||
366 | * function that writes the actual trigger records. | ||
367 | */ | ||
368 | void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, | ||
369 | unsigned int id) | ||
370 | { | ||
371 | if (!device->eer_cqr) | ||
372 | return; | ||
373 | switch (id) { | ||
374 | case DASD_EER_FATALERROR: | ||
375 | case DASD_EER_PPRCSUSPEND: | ||
376 | dasd_eer_write_standard_trigger(device, cqr, id); | ||
377 | break; | ||
378 | case DASD_EER_NOPATH: | ||
379 | dasd_eer_write_standard_trigger(device, NULL, id); | ||
380 | break; | ||
381 | case DASD_EER_STATECHANGE: | ||
382 | dasd_eer_write_snss_trigger(device, cqr, id); | ||
383 | break; | ||
384 | default: /* unknown trigger, so we write it without any sense data */ | ||
385 | dasd_eer_write_standard_trigger(device, NULL, id); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | EXPORT_SYMBOL(dasd_eer_write); | ||
390 | |||
391 | /* | ||
392 | * Start a sense subsystem status request. | ||
393 | * Needs to be called with the device held. | ||
394 | */ | ||
395 | void dasd_eer_snss(struct dasd_device *device) | ||
396 | { | ||
397 | struct dasd_ccw_req *cqr; | ||
398 | |||
399 | cqr = device->eer_cqr; | ||
400 | if (!cqr) /* Device not eer enabled. */ | ||
401 | return; | ||
402 | if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { | ||
403 | /* Sense subsystem status request in use. */ | ||
404 | set_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
405 | return; | ||
406 | } | ||
407 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
408 | cqr->status = DASD_CQR_QUEUED; | ||
409 | list_add(&cqr->list, &device->ccw_queue); | ||
410 | dasd_schedule_bh(device); | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Callback function for use with sense subsystem status request. | ||
415 | */ | ||
416 | static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) | ||
417 | { | ||
418 | struct dasd_device *device = cqr->device; | ||
419 | unsigned long flags; | ||
420 | |||
421 | dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); | ||
422 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
423 | if (device->eer_cqr == cqr) { | ||
424 | clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); | ||
425 | if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) | ||
426 | /* Another SNSS has been requested in the meantime. */ | ||
427 | dasd_eer_snss(device); | ||
428 | cqr = NULL; | ||
429 | } | ||
430 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
431 | if (cqr) | ||
432 | /* | ||
433 | * Extended error recovery has been switched off while | ||
434 | * the SNSS request was running. It could even have | ||
435 | * been switched off and on again in which case there | ||
436 | * is a new ccw in device->eer_cqr. Free the "old" | ||
437 | * snss request now. | ||
438 | */ | ||
439 | dasd_kfree_request(cqr, device); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Enable error reporting on a given device. | ||
444 | */ | ||
445 | int dasd_eer_enable(struct dasd_device *device) | ||
446 | { | ||
447 | struct dasd_ccw_req *cqr; | ||
448 | unsigned long flags; | ||
449 | |||
450 | if (device->eer_cqr) | ||
451 | return 0; | ||
452 | |||
453 | if (!device->discipline || strcmp(device->discipline->name, "ECKD")) | ||
454 | return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ | ||
455 | |||
456 | cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, | ||
457 | SNSS_DATA_SIZE, device); | ||
458 | if (!cqr) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | cqr->device = device; | ||
462 | cqr->retries = 255; | ||
463 | cqr->expires = 10 * HZ; | ||
464 | |||
465 | cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; | ||
466 | cqr->cpaddr->count = SNSS_DATA_SIZE; | ||
467 | cqr->cpaddr->flags = 0; | ||
468 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | ||
469 | |||
470 | cqr->buildclk = get_clock(); | ||
471 | cqr->status = DASD_CQR_FILLED; | ||
472 | cqr->callback = dasd_eer_snss_cb; | ||
473 | |||
474 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
475 | if (!device->eer_cqr) { | ||
476 | device->eer_cqr = cqr; | ||
477 | cqr = NULL; | ||
478 | } | ||
479 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
480 | if (cqr) | ||
481 | dasd_kfree_request(cqr, device); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Disable error reporting on a given device. | ||
487 | */ | ||
488 | void dasd_eer_disable(struct dasd_device *device) | ||
489 | { | ||
490 | struct dasd_ccw_req *cqr; | ||
491 | unsigned long flags; | ||
492 | int in_use; | ||
493 | |||
494 | if (!device->eer_cqr) | ||
495 | return; | ||
496 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
497 | cqr = device->eer_cqr; | ||
498 | device->eer_cqr = NULL; | ||
499 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
500 | in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); | ||
501 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
502 | if (cqr && !in_use) | ||
503 | dasd_kfree_request(cqr, device); | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * SECTION: the device operations | ||
508 | */ | ||
509 | |||
510 | /* | ||
511 | * On the one side we need a lock to access our internal buffer, on the | ||
512 | * other side a copy_to_user can sleep. So we need to copy the data we have | ||
513 | * to transfer in a readbuffer, which is protected by the readbuffer_mutex. | ||
514 | */ | ||
515 | static char readbuffer[PAGE_SIZE]; | ||
516 | static DECLARE_MUTEX(readbuffer_mutex); | ||
517 | |||
518 | static int dasd_eer_open(struct inode *inp, struct file *filp) | ||
519 | { | ||
520 | struct eerbuffer *eerb; | ||
521 | unsigned long flags; | ||
522 | |||
523 | eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); | ||
524 | eerb->buffer_page_count = eer_pages; | ||
525 | if (eerb->buffer_page_count < 1 || | ||
526 | eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { | ||
527 | kfree(eerb); | ||
528 | MESSAGE(KERN_WARNING, "can't open device since module " | ||
529 | "parameter eer_pages is smaller then 1 or" | ||
530 | " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; | ||
534 | eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), | ||
535 | GFP_KERNEL); | ||
536 | if (!eerb->buffer) { | ||
537 | kfree(eerb); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, | ||
541 | eerb->buffer_page_count)) { | ||
542 | kfree(eerb->buffer); | ||
543 | kfree(eerb); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | filp->private_data = eerb; | ||
547 | spin_lock_irqsave(&bufferlock, flags); | ||
548 | list_add(&eerb->list, &bufferlist); | ||
549 | spin_unlock_irqrestore(&bufferlock, flags); | ||
550 | |||
551 | return nonseekable_open(inp,filp); | ||
552 | } | ||
553 | |||
554 | static int dasd_eer_close(struct inode *inp, struct file *filp) | ||
555 | { | ||
556 | struct eerbuffer *eerb; | ||
557 | unsigned long flags; | ||
558 | |||
559 | eerb = (struct eerbuffer *) filp->private_data; | ||
560 | spin_lock_irqsave(&bufferlock, flags); | ||
561 | list_del(&eerb->list); | ||
562 | spin_unlock_irqrestore(&bufferlock, flags); | ||
563 | dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); | ||
564 | kfree(eerb->buffer); | ||
565 | kfree(eerb); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static ssize_t dasd_eer_read(struct file *filp, char __user *buf, | ||
571 | size_t count, loff_t *ppos) | ||
572 | { | ||
573 | int tc,rc; | ||
574 | int tailcount,effective_count; | ||
575 | unsigned long flags; | ||
576 | struct eerbuffer *eerb; | ||
577 | |||
578 | eerb = (struct eerbuffer *) filp->private_data; | ||
579 | if (down_interruptible(&readbuffer_mutex)) | ||
580 | return -ERESTARTSYS; | ||
581 | |||
582 | spin_lock_irqsave(&bufferlock, flags); | ||
583 | |||
584 | if (eerb->residual < 0) { /* the remainder of this record */ | ||
585 | /* has been deleted */ | ||
586 | eerb->residual = 0; | ||
587 | spin_unlock_irqrestore(&bufferlock, flags); | ||
588 | up(&readbuffer_mutex); | ||
589 | return -EIO; | ||
590 | } else if (eerb->residual > 0) { | ||
591 | /* OK we still have a second half of a record to deliver */ | ||
592 | effective_count = min(eerb->residual, (int) count); | ||
593 | eerb->residual -= effective_count; | ||
594 | } else { | ||
595 | tc = 0; | ||
596 | while (!tc) { | ||
597 | tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, | ||
598 | sizeof(tailcount)); | ||
599 | if (!tc) { | ||
600 | /* no data available */ | ||
601 | spin_unlock_irqrestore(&bufferlock, flags); | ||
602 | up(&readbuffer_mutex); | ||
603 | if (filp->f_flags & O_NONBLOCK) | ||
604 | return -EAGAIN; | ||
605 | rc = wait_event_interruptible( | ||
606 | dasd_eer_read_wait_queue, | ||
607 | eerb->head != eerb->tail); | ||
608 | if (rc) | ||
609 | return rc; | ||
610 | if (down_interruptible(&readbuffer_mutex)) | ||
611 | return -ERESTARTSYS; | ||
612 | spin_lock_irqsave(&bufferlock, flags); | ||
613 | } | ||
614 | } | ||
615 | WARN_ON(tc != sizeof(tailcount)); | ||
616 | effective_count = min(tailcount,(int)count); | ||
617 | eerb->residual = tailcount - effective_count; | ||
618 | } | ||
619 | |||
620 | tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); | ||
621 | WARN_ON(tc != effective_count); | ||
622 | |||
623 | spin_unlock_irqrestore(&bufferlock, flags); | ||
624 | |||
625 | if (copy_to_user(buf, readbuffer, effective_count)) { | ||
626 | up(&readbuffer_mutex); | ||
627 | return -EFAULT; | ||
628 | } | ||
629 | |||
630 | up(&readbuffer_mutex); | ||
631 | return effective_count; | ||
632 | } | ||
633 | |||
634 | static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) | ||
635 | { | ||
636 | unsigned int mask; | ||
637 | unsigned long flags; | ||
638 | struct eerbuffer *eerb; | ||
639 | |||
640 | eerb = (struct eerbuffer *) filp->private_data; | ||
641 | poll_wait(filp, &dasd_eer_read_wait_queue, ptable); | ||
642 | spin_lock_irqsave(&bufferlock, flags); | ||
643 | if (eerb->head != eerb->tail) | ||
644 | mask = POLLIN | POLLRDNORM ; | ||
645 | else | ||
646 | mask = 0; | ||
647 | spin_unlock_irqrestore(&bufferlock, flags); | ||
648 | return mask; | ||
649 | } | ||
650 | |||
651 | static struct file_operations dasd_eer_fops = { | ||
652 | .open = &dasd_eer_open, | ||
653 | .release = &dasd_eer_close, | ||
654 | .read = &dasd_eer_read, | ||
655 | .poll = &dasd_eer_poll, | ||
656 | .owner = THIS_MODULE, | ||
657 | }; | ||
658 | |||
659 | static struct miscdevice dasd_eer_dev = { | ||
660 | .minor = MISC_DYNAMIC_MINOR, | ||
661 | .name = "dasd_eer", | ||
662 | .fops = &dasd_eer_fops, | ||
663 | }; | ||
664 | |||
665 | int __init dasd_eer_init(void) | ||
666 | { | ||
667 | int rc; | ||
668 | |||
669 | rc = misc_register(&dasd_eer_dev); | ||
670 | if (rc) { | ||
671 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
672 | "register misc device"); | ||
673 | return rc; | ||
674 | } | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | void __exit dasd_eer_exit(void) | ||
680 | { | ||
681 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | ||
682 | } | ||
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 7cb0b9e78a6a..4293ba827523 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -69,15 +69,6 @@ | |||
69 | */ | 69 | */ |
70 | struct dasd_device; | 70 | struct dasd_device; |
71 | 71 | ||
72 | typedef int (*dasd_ioctl_fn_t) (struct block_device *bdev, int no, long args); | ||
73 | |||
74 | struct dasd_ioctl { | ||
75 | struct list_head list; | ||
76 | struct module *owner; | ||
77 | int no; | ||
78 | dasd_ioctl_fn_t handler; | ||
79 | }; | ||
80 | |||
81 | typedef enum { | 72 | typedef enum { |
82 | dasd_era_fatal = -1, /* no chance to recover */ | 73 | dasd_era_fatal = -1, /* no chance to recover */ |
83 | dasd_era_none = 0, /* don't recover, everything alright */ | 74 | dasd_era_none = 0, /* don't recover, everything alright */ |
@@ -272,10 +263,28 @@ struct dasd_discipline { | |||
272 | /* i/o control functions. */ | 263 | /* i/o control functions. */ |
273 | int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); | 264 | int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); |
274 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); | 265 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); |
266 | int (*ioctl) (struct dasd_device *, unsigned int, void __user *); | ||
275 | }; | 267 | }; |
276 | 268 | ||
277 | extern struct dasd_discipline *dasd_diag_discipline_pointer; | 269 | extern struct dasd_discipline *dasd_diag_discipline_pointer; |
278 | 270 | ||
271 | |||
272 | /* | ||
273 | * Notification numbers for extended error reporting notifications: | ||
274 | * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's | ||
275 | * eer pointer) is freed. The error reporting module needs to do all necessary | ||
276 | * cleanup steps. | ||
277 | * The DASD_EER_TRIGGER notification sends the actual error reports (triggers). | ||
278 | */ | ||
279 | #define DASD_EER_DISABLE 0 | ||
280 | #define DASD_EER_TRIGGER 1 | ||
281 | |||
282 | /* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */ | ||
283 | #define DASD_EER_FATALERROR 1 | ||
284 | #define DASD_EER_NOPATH 2 | ||
285 | #define DASD_EER_STATECHANGE 3 | ||
286 | #define DASD_EER_PPRCSUSPEND 4 | ||
287 | |||
279 | struct dasd_device { | 288 | struct dasd_device { |
280 | /* Block device stuff. */ | 289 | /* Block device stuff. */ |
281 | struct gendisk *gdp; | 290 | struct gendisk *gdp; |
@@ -289,6 +298,9 @@ struct dasd_device { | |||
289 | unsigned long flags; /* per device flags */ | 298 | unsigned long flags; /* per device flags */ |
290 | unsigned short features; /* copy of devmap-features (read-only!) */ | 299 | unsigned short features; /* copy of devmap-features (read-only!) */ |
291 | 300 | ||
301 | /* extended error reporting stuff (eer) */ | ||
302 | struct dasd_ccw_req *eer_cqr; | ||
303 | |||
292 | /* Device discipline stuff. */ | 304 | /* Device discipline stuff. */ |
293 | struct dasd_discipline *discipline; | 305 | struct dasd_discipline *discipline; |
294 | struct dasd_discipline *base_discipline; | 306 | struct dasd_discipline *base_discipline; |
@@ -334,6 +346,8 @@ struct dasd_device { | |||
334 | /* per device flags */ | 346 | /* per device flags */ |
335 | #define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */ | 347 | #define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */ |
336 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ | 348 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ |
349 | #define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ | ||
350 | #define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ | ||
337 | 351 | ||
338 | void dasd_put_device_wake(struct dasd_device *); | 352 | void dasd_put_device_wake(struct dasd_device *); |
339 | 353 | ||
@@ -523,10 +537,6 @@ int dasd_scan_partitions(struct dasd_device *); | |||
523 | void dasd_destroy_partitions(struct dasd_device *); | 537 | void dasd_destroy_partitions(struct dasd_device *); |
524 | 538 | ||
525 | /* externals in dasd_ioctl.c */ | 539 | /* externals in dasd_ioctl.c */ |
526 | int dasd_ioctl_init(void); | ||
527 | void dasd_ioctl_exit(void); | ||
528 | int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t); | ||
529 | int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t); | ||
530 | int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); | 540 | int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); |
531 | long dasd_compat_ioctl(struct file *, unsigned int, unsigned long); | 541 | long dasd_compat_ioctl(struct file *, unsigned int, unsigned long); |
532 | 542 | ||
@@ -557,6 +567,30 @@ dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *); | |||
557 | dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *); | 567 | dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *); |
558 | struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *); | 568 | struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *); |
559 | 569 | ||
570 | /* externals in dasd_eer.c */ | ||
571 | #ifdef CONFIG_DASD_EER | ||
572 | int dasd_eer_init(void); | ||
573 | void dasd_eer_exit(void); | ||
574 | int dasd_eer_enable(struct dasd_device *); | ||
575 | void dasd_eer_disable(struct dasd_device *); | ||
576 | void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr, | ||
577 | unsigned int id); | ||
578 | void dasd_eer_snss(struct dasd_device *); | ||
579 | |||
580 | static inline int dasd_eer_enabled(struct dasd_device *device) | ||
581 | { | ||
582 | return device->eer_cqr != NULL; | ||
583 | } | ||
584 | #else | ||
585 | #define dasd_eer_init() (0) | ||
586 | #define dasd_eer_exit() do { } while (0) | ||
587 | #define dasd_eer_enable(d) (0) | ||
588 | #define dasd_eer_disable(d) do { } while (0) | ||
589 | #define dasd_eer_write(d,c,i) do { } while (0) | ||
590 | #define dasd_eer_snss(d) do { } while (0) | ||
591 | #define dasd_eer_enabled(d) (0) | ||
592 | #endif /* CONFIG_DASD_ERR */ | ||
593 | |||
560 | #endif /* __KERNEL__ */ | 594 | #endif /* __KERNEL__ */ |
561 | 595 | ||
562 | #endif /* DASD_H */ | 596 | #endif /* DASD_H */ |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index fafeeae52675..b8c80d28df41 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/blkpg.h> | 16 | #include <linux/blkpg.h> |
17 | 17 | ||
18 | #include <asm/ccwdev.h> | 18 | #include <asm/ccwdev.h> |
19 | #include <asm/cmb.h> | ||
19 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
20 | 21 | ||
21 | /* This is ugly... */ | 22 | /* This is ugly... */ |
@@ -23,116 +24,12 @@ | |||
23 | 24 | ||
24 | #include "dasd_int.h" | 25 | #include "dasd_int.h" |
25 | 26 | ||
26 | /* | ||
27 | * SECTION: ioctl functions. | ||
28 | */ | ||
29 | static struct list_head dasd_ioctl_list = LIST_HEAD_INIT(dasd_ioctl_list); | ||
30 | |||
31 | /* | ||
32 | * Find the ioctl with number no. | ||
33 | */ | ||
34 | static struct dasd_ioctl * | ||
35 | dasd_find_ioctl(int no) | ||
36 | { | ||
37 | struct dasd_ioctl *ioctl; | ||
38 | |||
39 | list_for_each_entry (ioctl, &dasd_ioctl_list, list) | ||
40 | if (ioctl->no == no) | ||
41 | return ioctl; | ||
42 | return NULL; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Register ioctl with number no. | ||
47 | */ | ||
48 | int | ||
49 | dasd_ioctl_no_register(struct module *owner, int no, dasd_ioctl_fn_t handler) | ||
50 | { | ||
51 | struct dasd_ioctl *new; | ||
52 | if (dasd_find_ioctl(no)) | ||
53 | return -EBUSY; | ||
54 | new = kmalloc(sizeof (struct dasd_ioctl), GFP_KERNEL); | ||
55 | if (new == NULL) | ||
56 | return -ENOMEM; | ||
57 | new->owner = owner; | ||
58 | new->no = no; | ||
59 | new->handler = handler; | ||
60 | list_add(&new->list, &dasd_ioctl_list); | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Deregister ioctl with number no. | ||
66 | */ | ||
67 | int | ||
68 | dasd_ioctl_no_unregister(struct module *owner, int no, dasd_ioctl_fn_t handler) | ||
69 | { | ||
70 | struct dasd_ioctl *old = dasd_find_ioctl(no); | ||
71 | if (old == NULL) | ||
72 | return -ENOENT; | ||
73 | if (old->no != no || old->handler != handler || owner != old->owner) | ||
74 | return -EINVAL; | ||
75 | list_del(&old->list); | ||
76 | kfree(old); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | int | ||
81 | dasd_ioctl(struct inode *inp, struct file *filp, | ||
82 | unsigned int no, unsigned long data) | ||
83 | { | ||
84 | struct block_device *bdev = inp->i_bdev; | ||
85 | struct dasd_device *device = bdev->bd_disk->private_data; | ||
86 | struct dasd_ioctl *ioctl; | ||
87 | const char *dir; | ||
88 | int rc; | ||
89 | |||
90 | if ((_IOC_DIR(no) != _IOC_NONE) && (data == 0)) { | ||
91 | PRINT_DEBUG("empty data ptr"); | ||
92 | return -EINVAL; | ||
93 | } | ||
94 | dir = _IOC_DIR (no) == _IOC_NONE ? "0" : | ||
95 | _IOC_DIR (no) == _IOC_READ ? "r" : | ||
96 | _IOC_DIR (no) == _IOC_WRITE ? "w" : | ||
97 | _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u"; | ||
98 | DBF_DEV_EVENT(DBF_DEBUG, device, | ||
99 | "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", no, | ||
100 | dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data); | ||
101 | /* Search for ioctl no in the ioctl list. */ | ||
102 | list_for_each_entry(ioctl, &dasd_ioctl_list, list) { | ||
103 | if (ioctl->no == no) { | ||
104 | /* Found a matching ioctl. Call it. */ | ||
105 | if (!try_module_get(ioctl->owner)) | ||
106 | continue; | ||
107 | rc = ioctl->handler(bdev, no, data); | ||
108 | module_put(ioctl->owner); | ||
109 | return rc; | ||
110 | } | ||
111 | } | ||
112 | /* No ioctl with number no. */ | ||
113 | DBF_DEV_EVENT(DBF_INFO, device, | ||
114 | "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", no, | ||
115 | dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data); | ||
116 | return -EINVAL; | ||
117 | } | ||
118 | |||
119 | long | ||
120 | dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
121 | { | ||
122 | int rval; | ||
123 | |||
124 | lock_kernel(); | ||
125 | rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); | ||
126 | unlock_kernel(); | ||
127 | |||
128 | return (rval == -EINVAL) ? -ENOIOCTLCMD : rval; | ||
129 | } | ||
130 | 27 | ||
131 | static int | 28 | static int |
132 | dasd_ioctl_api_version(struct block_device *bdev, int no, long args) | 29 | dasd_ioctl_api_version(void __user *argp) |
133 | { | 30 | { |
134 | int ver = DASD_API_VERSION; | 31 | int ver = DASD_API_VERSION; |
135 | return put_user(ver, (int __user *) args); | 32 | return put_user(ver, (int __user *)argp); |
136 | } | 33 | } |
137 | 34 | ||
138 | /* | 35 | /* |
@@ -140,20 +37,18 @@ dasd_ioctl_api_version(struct block_device *bdev, int no, long args) | |||
140 | * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection | 37 | * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection |
141 | */ | 38 | */ |
142 | static int | 39 | static int |
143 | dasd_ioctl_enable(struct block_device *bdev, int no, long args) | 40 | dasd_ioctl_enable(struct block_device *bdev) |
144 | { | 41 | { |
145 | struct dasd_device *device; | 42 | struct dasd_device *device = bdev->bd_disk->private_data; |
146 | 43 | ||
147 | if (!capable(CAP_SYS_ADMIN)) | 44 | if (!capable(CAP_SYS_ADMIN)) |
148 | return -EACCES; | 45 | return -EACCES; |
149 | device = bdev->bd_disk->private_data; | 46 | |
150 | if (device == NULL) | ||
151 | return -ENODEV; | ||
152 | dasd_enable_device(device); | 47 | dasd_enable_device(device); |
153 | /* Formatting the dasd device can change the capacity. */ | 48 | /* Formatting the dasd device can change the capacity. */ |
154 | down(&bdev->bd_sem); | 49 | mutex_lock(&bdev->bd_mutex); |
155 | i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); | 50 | i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); |
156 | up(&bdev->bd_sem); | 51 | mutex_unlock(&bdev->bd_mutex); |
157 | return 0; | 52 | return 0; |
158 | } | 53 | } |
159 | 54 | ||
@@ -162,15 +57,13 @@ dasd_ioctl_enable(struct block_device *bdev, int no, long args) | |||
162 | * Used by dasdfmt. Disable I/O operations but allow ioctls. | 57 | * Used by dasdfmt. Disable I/O operations but allow ioctls. |
163 | */ | 58 | */ |
164 | static int | 59 | static int |
165 | dasd_ioctl_disable(struct block_device *bdev, int no, long args) | 60 | dasd_ioctl_disable(struct block_device *bdev) |
166 | { | 61 | { |
167 | struct dasd_device *device; | 62 | struct dasd_device *device = bdev->bd_disk->private_data; |
168 | 63 | ||
169 | if (!capable(CAP_SYS_ADMIN)) | 64 | if (!capable(CAP_SYS_ADMIN)) |
170 | return -EACCES; | 65 | return -EACCES; |
171 | device = bdev->bd_disk->private_data; | 66 | |
172 | if (device == NULL) | ||
173 | return -ENODEV; | ||
174 | /* | 67 | /* |
175 | * Man this is sick. We don't do a real disable but only downgrade | 68 | * Man this is sick. We don't do a real disable but only downgrade |
176 | * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses | 69 | * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses |
@@ -184,9 +77,9 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args) | |||
184 | * Set i_size to zero, since read, write, etc. check against this | 77 | * Set i_size to zero, since read, write, etc. check against this |
185 | * value. | 78 | * value. |
186 | */ | 79 | */ |
187 | down(&bdev->bd_sem); | 80 | mutex_lock(&bdev->bd_mutex); |
188 | i_size_write(bdev->bd_inode, 0); | 81 | i_size_write(bdev->bd_inode, 0); |
189 | up(&bdev->bd_sem); | 82 | mutex_unlock(&bdev->bd_mutex); |
190 | return 0; | 83 | return 0; |
191 | } | 84 | } |
192 | 85 | ||
@@ -194,18 +87,13 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args) | |||
194 | * Quiesce device. | 87 | * Quiesce device. |
195 | */ | 88 | */ |
196 | static int | 89 | static int |
197 | dasd_ioctl_quiesce(struct block_device *bdev, int no, long args) | 90 | dasd_ioctl_quiesce(struct dasd_device *device) |
198 | { | 91 | { |
199 | struct dasd_device *device; | ||
200 | unsigned long flags; | 92 | unsigned long flags; |
201 | 93 | ||
202 | if (!capable (CAP_SYS_ADMIN)) | 94 | if (!capable (CAP_SYS_ADMIN)) |
203 | return -EACCES; | 95 | return -EACCES; |
204 | 96 | ||
205 | device = bdev->bd_disk->private_data; | ||
206 | if (device == NULL) | ||
207 | return -ENODEV; | ||
208 | |||
209 | DEV_MESSAGE (KERN_DEBUG, device, "%s", | 97 | DEV_MESSAGE (KERN_DEBUG, device, "%s", |
210 | "Quiesce IO on device"); | 98 | "Quiesce IO on device"); |
211 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 99 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
@@ -219,18 +107,13 @@ dasd_ioctl_quiesce(struct block_device *bdev, int no, long args) | |||
219 | * Quiesce device. | 107 | * Quiesce device. |
220 | */ | 108 | */ |
221 | static int | 109 | static int |
222 | dasd_ioctl_resume(struct block_device *bdev, int no, long args) | 110 | dasd_ioctl_resume(struct dasd_device *device) |
223 | { | 111 | { |
224 | struct dasd_device *device; | ||
225 | unsigned long flags; | 112 | unsigned long flags; |
226 | 113 | ||
227 | if (!capable (CAP_SYS_ADMIN)) | 114 | if (!capable (CAP_SYS_ADMIN)) |
228 | return -EACCES; | 115 | return -EACCES; |
229 | 116 | ||
230 | device = bdev->bd_disk->private_data; | ||
231 | if (device == NULL) | ||
232 | return -ENODEV; | ||
233 | |||
234 | DEV_MESSAGE (KERN_DEBUG, device, "%s", | 117 | DEV_MESSAGE (KERN_DEBUG, device, "%s", |
235 | "resume IO on device"); | 118 | "resume IO on device"); |
236 | 119 | ||
@@ -302,25 +185,19 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata) | |||
302 | * Format device. | 185 | * Format device. |
303 | */ | 186 | */ |
304 | static int | 187 | static int |
305 | dasd_ioctl_format(struct block_device *bdev, int no, long args) | 188 | dasd_ioctl_format(struct block_device *bdev, void __user *argp) |
306 | { | 189 | { |
307 | struct dasd_device *device; | 190 | struct dasd_device *device = bdev->bd_disk->private_data; |
308 | struct format_data_t fdata; | 191 | struct format_data_t fdata; |
309 | 192 | ||
310 | if (!capable(CAP_SYS_ADMIN)) | 193 | if (!capable(CAP_SYS_ADMIN)) |
311 | return -EACCES; | 194 | return -EACCES; |
312 | if (!args) | 195 | if (!argp) |
313 | return -EINVAL; | 196 | return -EINVAL; |
314 | /* fdata == NULL is no longer a valid arg to dasd_format ! */ | ||
315 | device = bdev->bd_disk->private_data; | ||
316 | |||
317 | if (device == NULL) | ||
318 | return -ENODEV; | ||
319 | 197 | ||
320 | if (device->features & DASD_FEATURE_READONLY) | 198 | if (device->features & DASD_FEATURE_READONLY) |
321 | return -EROFS; | 199 | return -EROFS; |
322 | if (copy_from_user(&fdata, (void __user *) args, | 200 | if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) |
323 | sizeof (struct format_data_t))) | ||
324 | return -EFAULT; | 201 | return -EFAULT; |
325 | if (bdev != bdev->bd_contains) { | 202 | if (bdev != bdev->bd_contains) { |
326 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 203 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
@@ -335,17 +212,8 @@ dasd_ioctl_format(struct block_device *bdev, int no, long args) | |||
335 | * Reset device profile information | 212 | * Reset device profile information |
336 | */ | 213 | */ |
337 | static int | 214 | static int |
338 | dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) | 215 | dasd_ioctl_reset_profile(struct dasd_device *device) |
339 | { | 216 | { |
340 | struct dasd_device *device; | ||
341 | |||
342 | if (!capable(CAP_SYS_ADMIN)) | ||
343 | return -EACCES; | ||
344 | |||
345 | device = bdev->bd_disk->private_data; | ||
346 | if (device == NULL) | ||
347 | return -ENODEV; | ||
348 | |||
349 | memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); | 217 | memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); |
350 | return 0; | 218 | return 0; |
351 | } | 219 | } |
@@ -354,31 +222,24 @@ dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) | |||
354 | * Return device profile information | 222 | * Return device profile information |
355 | */ | 223 | */ |
356 | static int | 224 | static int |
357 | dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) | 225 | dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) |
358 | { | 226 | { |
359 | struct dasd_device *device; | ||
360 | |||
361 | device = bdev->bd_disk->private_data; | ||
362 | if (device == NULL) | ||
363 | return -ENODEV; | ||
364 | |||
365 | if (dasd_profile_level == DASD_PROFILE_OFF) | 227 | if (dasd_profile_level == DASD_PROFILE_OFF) |
366 | return -EIO; | 228 | return -EIO; |
367 | 229 | if (copy_to_user(argp, &device->profile, | |
368 | if (copy_to_user((long __user *) args, (long *) &device->profile, | ||
369 | sizeof (struct dasd_profile_info_t))) | 230 | sizeof (struct dasd_profile_info_t))) |
370 | return -EFAULT; | 231 | return -EFAULT; |
371 | return 0; | 232 | return 0; |
372 | } | 233 | } |
373 | #else | 234 | #else |
374 | static int | 235 | static int |
375 | dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) | 236 | dasd_ioctl_reset_profile(struct dasd_device *device) |
376 | { | 237 | { |
377 | return -ENOSYS; | 238 | return -ENOSYS; |
378 | } | 239 | } |
379 | 240 | ||
380 | static int | 241 | static int |
381 | dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) | 242 | dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) |
382 | { | 243 | { |
383 | return -ENOSYS; | 244 | return -ENOSYS; |
384 | } | 245 | } |
@@ -388,22 +249,18 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) | |||
388 | * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. | 249 | * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. |
389 | */ | 250 | */ |
390 | static int | 251 | static int |
391 | dasd_ioctl_information(struct block_device *bdev, int no, long args) | 252 | dasd_ioctl_information(struct dasd_device *device, |
253 | unsigned int cmd, void __user *argp) | ||
392 | { | 254 | { |
393 | struct dasd_device *device; | ||
394 | struct dasd_information2_t *dasd_info; | 255 | struct dasd_information2_t *dasd_info; |
395 | unsigned long flags; | 256 | unsigned long flags; |
396 | int rc; | 257 | int rc; |
397 | struct ccw_device *cdev; | 258 | struct ccw_device *cdev; |
398 | 259 | ||
399 | device = bdev->bd_disk->private_data; | ||
400 | if (device == NULL) | ||
401 | return -ENODEV; | ||
402 | |||
403 | if (!device->discipline->fill_info) | 260 | if (!device->discipline->fill_info) |
404 | return -EINVAL; | 261 | return -EINVAL; |
405 | 262 | ||
406 | dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); | 263 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); |
407 | if (dasd_info == NULL) | 264 | if (dasd_info == NULL) |
408 | return -ENOMEM; | 265 | return -ENOMEM; |
409 | 266 | ||
@@ -446,8 +303,7 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) | |||
446 | memcpy(dasd_info->type, device->discipline->name, 4); | 303 | memcpy(dasd_info->type, device->discipline->name, 4); |
447 | else | 304 | else |
448 | memcpy(dasd_info->type, "none", 4); | 305 | memcpy(dasd_info->type, "none", 4); |
449 | dasd_info->req_queue_len = 0; | 306 | |
450 | dasd_info->chanq_len = 0; | ||
451 | if (device->request_queue->request_fn) { | 307 | if (device->request_queue->request_fn) { |
452 | struct list_head *l; | 308 | struct list_head *l; |
453 | #ifdef DASD_EXTENDED_PROFILING | 309 | #ifdef DASD_EXTENDED_PROFILING |
@@ -467,8 +323,8 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) | |||
467 | } | 323 | } |
468 | 324 | ||
469 | rc = 0; | 325 | rc = 0; |
470 | if (copy_to_user((long __user *) args, (long *) dasd_info, | 326 | if (copy_to_user(argp, dasd_info, |
471 | ((no == (unsigned int) BIODASDINFO2) ? | 327 | ((cmd == (unsigned int) BIODASDINFO2) ? |
472 | sizeof (struct dasd_information2_t) : | 328 | sizeof (struct dasd_information2_t) : |
473 | sizeof (struct dasd_information_t)))) | 329 | sizeof (struct dasd_information_t)))) |
474 | rc = -EFAULT; | 330 | rc = -EFAULT; |
@@ -480,69 +336,103 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) | |||
480 | * Set read only | 336 | * Set read only |
481 | */ | 337 | */ |
482 | static int | 338 | static int |
483 | dasd_ioctl_set_ro(struct block_device *bdev, int no, long args) | 339 | dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) |
484 | { | 340 | { |
485 | struct dasd_device *device; | 341 | struct dasd_device *device = bdev->bd_disk->private_data; |
486 | int intval, rc; | 342 | int intval; |
487 | 343 | ||
488 | if (!capable(CAP_SYS_ADMIN)) | 344 | if (!capable(CAP_SYS_ADMIN)) |
489 | return -EACCES; | 345 | return -EACCES; |
490 | if (bdev != bdev->bd_contains) | 346 | if (bdev != bdev->bd_contains) |
491 | // ro setting is not allowed for partitions | 347 | // ro setting is not allowed for partitions |
492 | return -EINVAL; | 348 | return -EINVAL; |
493 | if (get_user(intval, (int __user *) args)) | 349 | if (get_user(intval, (int *)argp)) |
494 | return -EFAULT; | 350 | return -EFAULT; |
495 | device = bdev->bd_disk->private_data; | ||
496 | if (device == NULL) | ||
497 | return -ENODEV; | ||
498 | 351 | ||
499 | set_disk_ro(bdev->bd_disk, intval); | 352 | set_disk_ro(bdev->bd_disk, intval); |
500 | rc = dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); | 353 | return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); |
501 | |||
502 | return rc; | ||
503 | } | 354 | } |
504 | 355 | ||
505 | /* | 356 | static int |
506 | * List of static ioctls. | 357 | dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd, |
507 | */ | 358 | unsigned long arg) |
508 | static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] = | ||
509 | { | 359 | { |
510 | { BIODASDDISABLE, dasd_ioctl_disable }, | 360 | struct cmbdata __user *argp = (void __user *) arg; |
511 | { BIODASDENABLE, dasd_ioctl_enable }, | 361 | size_t size = _IOC_SIZE(cmd); |
512 | { BIODASDQUIESCE, dasd_ioctl_quiesce }, | 362 | struct cmbdata data; |
513 | { BIODASDRESUME, dasd_ioctl_resume }, | 363 | int ret; |
514 | { BIODASDFMT, dasd_ioctl_format }, | 364 | |
515 | { BIODASDINFO, dasd_ioctl_information }, | 365 | ret = cmf_readall(device->cdev, &data); |
516 | { BIODASDINFO2, dasd_ioctl_information }, | 366 | if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) |
517 | { BIODASDPRRD, dasd_ioctl_read_profile }, | 367 | return -EFAULT; |
518 | { BIODASDPRRST, dasd_ioctl_reset_profile }, | 368 | return ret; |
519 | { BLKROSET, dasd_ioctl_set_ro }, | 369 | } |
520 | { DASDAPIVER, dasd_ioctl_api_version }, | ||
521 | { -1, NULL } | ||
522 | }; | ||
523 | 370 | ||
524 | int | 371 | int |
525 | dasd_ioctl_init(void) | 372 | dasd_ioctl(struct inode *inode, struct file *file, |
373 | unsigned int cmd, unsigned long arg) | ||
526 | { | 374 | { |
527 | int i; | 375 | struct block_device *bdev = inode->i_bdev; |
376 | struct dasd_device *device = bdev->bd_disk->private_data; | ||
377 | void __user *argp = (void __user *)arg; | ||
528 | 378 | ||
529 | for (i = 0; dasd_ioctls[i].no != -1; i++) | 379 | if (!device) |
530 | dasd_ioctl_no_register(NULL, dasd_ioctls[i].no, | 380 | return -ENODEV; |
531 | dasd_ioctls[i].fn); | 381 | |
532 | return 0; | 382 | if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { |
383 | PRINT_DEBUG("empty data ptr"); | ||
384 | return -EINVAL; | ||
385 | } | ||
533 | 386 | ||
387 | switch (cmd) { | ||
388 | case BIODASDDISABLE: | ||
389 | return dasd_ioctl_disable(bdev); | ||
390 | case BIODASDENABLE: | ||
391 | return dasd_ioctl_enable(bdev); | ||
392 | case BIODASDQUIESCE: | ||
393 | return dasd_ioctl_quiesce(device); | ||
394 | case BIODASDRESUME: | ||
395 | return dasd_ioctl_resume(device); | ||
396 | case BIODASDFMT: | ||
397 | return dasd_ioctl_format(bdev, argp); | ||
398 | case BIODASDINFO: | ||
399 | return dasd_ioctl_information(device, cmd, argp); | ||
400 | case BIODASDINFO2: | ||
401 | return dasd_ioctl_information(device, cmd, argp); | ||
402 | case BIODASDPRRD: | ||
403 | return dasd_ioctl_read_profile(device, argp); | ||
404 | case BIODASDPRRST: | ||
405 | return dasd_ioctl_reset_profile(device); | ||
406 | case BLKROSET: | ||
407 | return dasd_ioctl_set_ro(bdev, argp); | ||
408 | case DASDAPIVER: | ||
409 | return dasd_ioctl_api_version(argp); | ||
410 | case BIODASDCMFENABLE: | ||
411 | return enable_cmf(device->cdev); | ||
412 | case BIODASDCMFDISABLE: | ||
413 | return disable_cmf(device->cdev); | ||
414 | case BIODASDREADALLCMB: | ||
415 | return dasd_ioctl_readall_cmb(device, cmd, arg); | ||
416 | default: | ||
417 | /* if the discipline has an ioctl method try it. */ | ||
418 | if (device->discipline->ioctl) { | ||
419 | int rval = device->discipline->ioctl(device, cmd, argp); | ||
420 | if (rval != -ENOIOCTLCMD) | ||
421 | return rval; | ||
422 | } | ||
423 | |||
424 | return -EINVAL; | ||
425 | } | ||
534 | } | 426 | } |
535 | 427 | ||
536 | void | 428 | long |
537 | dasd_ioctl_exit(void) | 429 | dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
538 | { | 430 | { |
539 | int i; | 431 | int rval; |
540 | 432 | ||
541 | for (i = 0; dasd_ioctls[i].no != -1; i++) | 433 | lock_kernel(); |
542 | dasd_ioctl_no_unregister(NULL, dasd_ioctls[i].no, | 434 | rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); |
543 | dasd_ioctls[i].fn); | 435 | unlock_kernel(); |
544 | 436 | ||
437 | return (rval == -EINVAL) ? -ENOIOCTLCMD : rval; | ||
545 | } | 438 | } |
546 | |||
547 | EXPORT_SYMBOL(dasd_ioctl_no_register); | ||
548 | EXPORT_SYMBOL(dasd_ioctl_no_unregister); | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2e727f49ad19..be9b05347b4f 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -273,7 +273,7 @@ removeseg: | |||
273 | list_del(&dev_info->lh); | 273 | list_del(&dev_info->lh); |
274 | 274 | ||
275 | del_gendisk(dev_info->gd); | 275 | del_gendisk(dev_info->gd); |
276 | blk_put_queue(dev_info->dcssblk_queue); | 276 | blk_cleanup_queue(dev_info->dcssblk_queue); |
277 | dev_info->gd->queue = NULL; | 277 | dev_info->gd->queue = NULL; |
278 | put_disk(dev_info->gd); | 278 | put_disk(dev_info->gd); |
279 | device_unregister(dev); | 279 | device_unregister(dev); |
@@ -388,12 +388,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
388 | /* | 388 | /* |
389 | * get a struct dcssblk_dev_info | 389 | * get a struct dcssblk_dev_info |
390 | */ | 390 | */ |
391 | dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); | 391 | dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); |
392 | if (dev_info == NULL) { | 392 | if (dev_info == NULL) { |
393 | rc = -ENOMEM; | 393 | rc = -ENOMEM; |
394 | goto out; | 394 | goto out; |
395 | } | 395 | } |
396 | memset(dev_info, 0, sizeof(struct dcssblk_dev_info)); | ||
397 | 396 | ||
398 | strcpy(dev_info->segment_name, local_buf); | 397 | strcpy(dev_info->segment_name, local_buf); |
399 | strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE); | 398 | strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE); |
@@ -491,7 +490,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
491 | unregister_dev: | 490 | unregister_dev: |
492 | PRINT_ERR("device_create_file() failed!\n"); | 491 | PRINT_ERR("device_create_file() failed!\n"); |
493 | list_del(&dev_info->lh); | 492 | list_del(&dev_info->lh); |
494 | blk_put_queue(dev_info->dcssblk_queue); | 493 | blk_cleanup_queue(dev_info->dcssblk_queue); |
495 | dev_info->gd->queue = NULL; | 494 | dev_info->gd->queue = NULL; |
496 | put_disk(dev_info->gd); | 495 | put_disk(dev_info->gd); |
497 | device_unregister(&dev_info->dev); | 496 | device_unregister(&dev_info->dev); |
@@ -505,7 +504,7 @@ list_del: | |||
505 | unload_seg: | 504 | unload_seg: |
506 | segment_unload(local_buf); | 505 | segment_unload(local_buf); |
507 | dealloc_gendisk: | 506 | dealloc_gendisk: |
508 | blk_put_queue(dev_info->dcssblk_queue); | 507 | blk_cleanup_queue(dev_info->dcssblk_queue); |
509 | dev_info->gd->queue = NULL; | 508 | dev_info->gd->queue = NULL; |
510 | put_disk(dev_info->gd); | 509 | put_disk(dev_info->gd); |
511 | free_dev_info: | 510 | free_dev_info: |
@@ -562,7 +561,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
562 | list_del(&dev_info->lh); | 561 | list_del(&dev_info->lh); |
563 | 562 | ||
564 | del_gendisk(dev_info->gd); | 563 | del_gendisk(dev_info->gd); |
565 | blk_put_queue(dev_info->dcssblk_queue); | 564 | blk_cleanup_queue(dev_info->dcssblk_queue); |
566 | dev_info->gd->queue = NULL; | 565 | dev_info->gd->queue = NULL; |
567 | put_disk(dev_info->gd); | 566 | put_disk(dev_info->gd); |
568 | device_unregister(&dev_info->dev); | 567 | device_unregister(&dev_info->dev); |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 6377a96735df..0c0162ff6c0c 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -26,4 +26,5 @@ tape-$(CONFIG_PROC_FS) += tape_proc.o | |||
26 | tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) | 26 | tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) |
27 | obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o | 27 | obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o |
28 | obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o | 28 | obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o |
29 | obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o | ||
29 | obj-$(CONFIG_MONREADER) += monreader.o | 30 | obj-$(CONFIG_MONREADER) += monreader.o |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 5f6fa4c67843..a6415377bc73 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -368,10 +368,9 @@ fs3270_alloc_view(void) | |||
368 | { | 368 | { |
369 | struct fs3270 *fp; | 369 | struct fs3270 *fp; |
370 | 370 | ||
371 | fp = (struct fs3270 *) kmalloc(sizeof(struct fs3270),GFP_KERNEL); | 371 | fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL); |
372 | if (!fp) | 372 | if (!fp) |
373 | return ERR_PTR(-ENOMEM); | 373 | return ERR_PTR(-ENOMEM); |
374 | memset(fp, 0, sizeof(struct fs3270)); | ||
375 | fp->init = raw3270_request_alloc(0); | 374 | fp->init = raw3270_request_alloc(0); |
376 | if (IS_ERR(fp->init)) { | 375 | if (IS_ERR(fp->init)) { |
377 | kfree(fp); | 376 | kfree(fp); |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index a317a123daba..6badd8403409 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -50,14 +50,12 @@ kbd_alloc(void) { | |||
50 | struct kbd_data *kbd; | 50 | struct kbd_data *kbd; |
51 | int i, len; | 51 | int i, len; |
52 | 52 | ||
53 | kbd = kmalloc(sizeof(struct kbd_data), GFP_KERNEL); | 53 | kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); |
54 | if (!kbd) | 54 | if (!kbd) |
55 | goto out; | 55 | goto out; |
56 | memset(kbd, 0, sizeof(struct kbd_data)); | 56 | kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); |
57 | kbd->key_maps = kmalloc(sizeof(key_maps), GFP_KERNEL); | ||
58 | if (!key_maps) | 57 | if (!key_maps) |
59 | goto out_kbd; | 58 | goto out_kbd; |
60 | memset(kbd->key_maps, 0, sizeof(key_maps)); | ||
61 | for (i = 0; i < ARRAY_SIZE(key_maps); i++) { | 59 | for (i = 0; i < ARRAY_SIZE(key_maps); i++) { |
62 | if (key_maps[i]) { | 60 | if (key_maps[i]) { |
63 | kbd->key_maps[i] = | 61 | kbd->key_maps[i] = |
@@ -68,10 +66,9 @@ kbd_alloc(void) { | |||
68 | sizeof(u_short)*NR_KEYS); | 66 | sizeof(u_short)*NR_KEYS); |
69 | } | 67 | } |
70 | } | 68 | } |
71 | kbd->func_table = kmalloc(sizeof(func_table), GFP_KERNEL); | 69 | kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); |
72 | if (!kbd->func_table) | 70 | if (!kbd->func_table) |
73 | goto out_maps; | 71 | goto out_maps; |
74 | memset(kbd->func_table, 0, sizeof(func_table)); | ||
75 | for (i = 0; i < ARRAY_SIZE(func_table); i++) { | 72 | for (i = 0; i < ARRAY_SIZE(func_table); i++) { |
76 | if (func_table[i]) { | 73 | if (func_table[i]) { |
77 | len = strlen(func_table[i]) + 1; | 74 | len = strlen(func_table[i]) + 1; |
@@ -82,10 +79,9 @@ kbd_alloc(void) { | |||
82 | } | 79 | } |
83 | } | 80 | } |
84 | kbd->fn_handler = | 81 | kbd->fn_handler = |
85 | kmalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); | 82 | kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); |
86 | if (!kbd->fn_handler) | 83 | if (!kbd->fn_handler) |
87 | goto out_func; | 84 | goto out_func; |
88 | memset(kbd->fn_handler, 0, sizeof(fn_handler_fn *) * NR_FN_HANDLER); | ||
89 | kbd->accent_table = | 85 | kbd->accent_table = |
90 | kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL); | 86 | kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL); |
91 | if (!kbd->accent_table) | 87 | if (!kbd->accent_table) |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 5fd3ad867386..fb7bc9e5eebc 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -257,14 +257,13 @@ mon_alloc_mem(void) | |||
257 | int i,j; | 257 | int i,j; |
258 | struct mon_private *monpriv; | 258 | struct mon_private *monpriv; |
259 | 259 | ||
260 | monpriv = kmalloc(sizeof(struct mon_private), GFP_KERNEL); | 260 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
261 | if (!monpriv) { | 261 | if (!monpriv) { |
262 | P_ERROR("no memory for monpriv\n"); | 262 | P_ERROR("no memory for monpriv\n"); |
263 | return NULL; | 263 | return NULL; |
264 | } | 264 | } |
265 | memset(monpriv, 0, sizeof(struct mon_private)); | ||
266 | for (i = 0; i < MON_MSGLIM; i++) { | 265 | for (i = 0; i < MON_MSGLIM; i++) { |
267 | monpriv->msg_array[i] = kmalloc(sizeof(struct mon_msg), | 266 | monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), |
268 | GFP_KERNEL); | 267 | GFP_KERNEL); |
269 | if (!monpriv->msg_array[i]) { | 268 | if (!monpriv->msg_array[i]) { |
270 | P_ERROR("open, no memory for msg_array\n"); | 269 | P_ERROR("open, no memory for msg_array\n"); |
@@ -272,7 +271,6 @@ mon_alloc_mem(void) | |||
272 | kfree(monpriv->msg_array[j]); | 271 | kfree(monpriv->msg_array[j]); |
273 | return NULL; | 272 | return NULL; |
274 | } | 273 | } |
275 | memset(monpriv->msg_array[i], 0, sizeof(struct mon_msg)); | ||
276 | } | 274 | } |
277 | return monpriv; | 275 | return monpriv; |
278 | } | 276 | } |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 1026f2bc3185..eecb2afad5c2 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/major.h> | 28 | #include <linux/major.h> |
29 | #include <linux/kdev_t.h> | 29 | #include <linux/kdev_t.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/mutex.h> | ||
31 | 32 | ||
32 | struct class *class3270; | 33 | struct class *class3270; |
33 | 34 | ||
@@ -59,7 +60,7 @@ struct raw3270 { | |||
59 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ | 60 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ |
60 | 61 | ||
61 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ | 62 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ |
62 | static DECLARE_MUTEX(raw3270_sem); | 63 | static DEFINE_MUTEX(raw3270_mutex); |
63 | 64 | ||
64 | /* List of 3270 devices. */ | 65 | /* List of 3270 devices. */ |
65 | static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); | 66 | static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); |
@@ -115,10 +116,9 @@ raw3270_request_alloc(size_t size) | |||
115 | struct raw3270_request *rq; | 116 | struct raw3270_request *rq; |
116 | 117 | ||
117 | /* Allocate request structure */ | 118 | /* Allocate request structure */ |
118 | rq = kmalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); | 119 | rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); |
119 | if (!rq) | 120 | if (!rq) |
120 | return ERR_PTR(-ENOMEM); | 121 | return ERR_PTR(-ENOMEM); |
121 | memset(rq, 0, sizeof(struct raw3270_request)); | ||
122 | 122 | ||
123 | /* alloc output buffer. */ | 123 | /* alloc output buffer. */ |
124 | if (size > 0) { | 124 | if (size > 0) { |
@@ -816,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) | |||
816 | * number for it. Note: there is no device with minor 0, | 816 | * number for it. Note: there is no device with minor 0, |
817 | * see special case for fs3270.c:fs3270_open(). | 817 | * see special case for fs3270.c:fs3270_open(). |
818 | */ | 818 | */ |
819 | down(&raw3270_sem); | 819 | mutex_lock(&raw3270_mutex); |
820 | /* Keep the list sorted. */ | 820 | /* Keep the list sorted. */ |
821 | minor = RAW3270_FIRSTMINOR; | 821 | minor = RAW3270_FIRSTMINOR; |
822 | rp->minor = -1; | 822 | rp->minor = -1; |
@@ -833,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) | |||
833 | rp->minor = minor; | 833 | rp->minor = minor; |
834 | list_add_tail(&rp->list, &raw3270_devices); | 834 | list_add_tail(&rp->list, &raw3270_devices); |
835 | } | 835 | } |
836 | up(&raw3270_sem); | 836 | mutex_unlock(&raw3270_mutex); |
837 | /* No free minor number? Then give up. */ | 837 | /* No free minor number? Then give up. */ |
838 | if (rp->minor == -1) | 838 | if (rp->minor == -1) |
839 | return -EUSERS; | 839 | return -EUSERS; |
@@ -1004,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | |||
1004 | 1004 | ||
1005 | if (minor <= 0) | 1005 | if (minor <= 0) |
1006 | return -ENODEV; | 1006 | return -ENODEV; |
1007 | down(&raw3270_sem); | 1007 | mutex_lock(&raw3270_mutex); |
1008 | rc = -ENODEV; | 1008 | rc = -ENODEV; |
1009 | list_for_each_entry(rp, &raw3270_devices, list) { | 1009 | list_for_each_entry(rp, &raw3270_devices, list) { |
1010 | if (rp->minor != minor) | 1010 | if (rp->minor != minor) |
@@ -1025,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | |||
1025 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 1025 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
1026 | break; | 1026 | break; |
1027 | } | 1027 | } |
1028 | up(&raw3270_sem); | 1028 | mutex_unlock(&raw3270_mutex); |
1029 | return rc; | 1029 | return rc; |
1030 | } | 1030 | } |
1031 | 1031 | ||
@@ -1039,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) | |||
1039 | struct raw3270_view *view, *tmp; | 1039 | struct raw3270_view *view, *tmp; |
1040 | unsigned long flags; | 1040 | unsigned long flags; |
1041 | 1041 | ||
1042 | down(&raw3270_sem); | 1042 | mutex_lock(&raw3270_mutex); |
1043 | view = ERR_PTR(-ENODEV); | 1043 | view = ERR_PTR(-ENODEV); |
1044 | list_for_each_entry(rp, &raw3270_devices, list) { | 1044 | list_for_each_entry(rp, &raw3270_devices, list) { |
1045 | if (rp->minor != minor) | 1045 | if (rp->minor != minor) |
@@ -1058,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) | |||
1058 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 1058 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
1059 | break; | 1059 | break; |
1060 | } | 1060 | } |
1061 | up(&raw3270_sem); | 1061 | mutex_unlock(&raw3270_mutex); |
1062 | return view; | 1062 | return view; |
1063 | } | 1063 | } |
1064 | 1064 | ||
@@ -1105,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1105 | struct ccw_device *cdev; | 1105 | struct ccw_device *cdev; |
1106 | 1106 | ||
1107 | /* Remove from device chain. */ | 1107 | /* Remove from device chain. */ |
1108 | down(&raw3270_sem); | 1108 | mutex_lock(&raw3270_mutex); |
1109 | if (rp->clttydev) | 1109 | if (rp->clttydev) |
1110 | class_device_destroy(class3270, | 1110 | class_device_destroy(class3270, |
1111 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); | 1111 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); |
@@ -1113,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1113 | class_device_destroy(class3270, | 1113 | class_device_destroy(class3270, |
1114 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); | 1114 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); |
1115 | list_del_init(&rp->list); | 1115 | list_del_init(&rp->list); |
1116 | up(&raw3270_sem); | 1116 | mutex_unlock(&raw3270_mutex); |
1117 | 1117 | ||
1118 | /* Disconnect from ccw_device. */ | 1118 | /* Disconnect from ccw_device. */ |
1119 | cdev = rp->cdev; | 1119 | cdev = rp->cdev; |
@@ -1209,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int)) | |||
1209 | if (!np) | 1209 | if (!np) |
1210 | return -ENOMEM; | 1210 | return -ENOMEM; |
1211 | np->notifier = notifier; | 1211 | np->notifier = notifier; |
1212 | down(&raw3270_sem); | 1212 | mutex_lock(&raw3270_mutex); |
1213 | list_add_tail(&np->list, &raw3270_notifier); | 1213 | list_add_tail(&np->list, &raw3270_notifier); |
1214 | list_for_each_entry(rp, &raw3270_devices, list) { | 1214 | list_for_each_entry(rp, &raw3270_devices, list) { |
1215 | get_device(&rp->cdev->dev); | 1215 | get_device(&rp->cdev->dev); |
1216 | notifier(rp->minor, 1); | 1216 | notifier(rp->minor, 1); |
1217 | } | 1217 | } |
1218 | up(&raw3270_sem); | 1218 | mutex_unlock(&raw3270_mutex); |
1219 | return 0; | 1219 | return 0; |
1220 | } | 1220 | } |
1221 | 1221 | ||
@@ -1223,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int)) | |||
1223 | { | 1223 | { |
1224 | struct raw3270_notifier *np; | 1224 | struct raw3270_notifier *np; |
1225 | 1225 | ||
1226 | down(&raw3270_sem); | 1226 | mutex_lock(&raw3270_mutex); |
1227 | list_for_each_entry(np, &raw3270_notifier, list) | 1227 | list_for_each_entry(np, &raw3270_notifier, list) |
1228 | if (np->notifier == notifier) { | 1228 | if (np->notifier == notifier) { |
1229 | list_del(&np->list); | 1229 | list_del(&np->list); |
1230 | kfree(np); | 1230 | kfree(np); |
1231 | break; | 1231 | break; |
1232 | } | 1232 | } |
1233 | up(&raw3270_sem); | 1233 | mutex_unlock(&raw3270_mutex); |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | /* | 1236 | /* |
@@ -1257,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev) | |||
1257 | goto failure; | 1257 | goto failure; |
1258 | raw3270_create_attributes(rp); | 1258 | raw3270_create_attributes(rp); |
1259 | set_bit(RAW3270_FLAGS_READY, &rp->flags); | 1259 | set_bit(RAW3270_FLAGS_READY, &rp->flags); |
1260 | down(&raw3270_sem); | 1260 | mutex_lock(&raw3270_mutex); |
1261 | list_for_each_entry(np, &raw3270_notifier, list) | 1261 | list_for_each_entry(np, &raw3270_notifier, list) |
1262 | np->notifier(rp->minor, 1); | 1262 | np->notifier(rp->minor, 1); |
1263 | up(&raw3270_sem); | 1263 | mutex_unlock(&raw3270_mutex); |
1264 | return 0; | 1264 | return 0; |
1265 | 1265 | ||
1266 | failure: | 1266 | failure: |
@@ -1308,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev) | |||
1308 | } | 1308 | } |
1309 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1309 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
1310 | 1310 | ||
1311 | down(&raw3270_sem); | 1311 | mutex_lock(&raw3270_mutex); |
1312 | list_for_each_entry(np, &raw3270_notifier, list) | 1312 | list_for_each_entry(np, &raw3270_notifier, list) |
1313 | np->notifier(rp->minor, 0); | 1313 | np->notifier(rp->minor, 0); |
1314 | up(&raw3270_sem); | 1314 | mutex_unlock(&raw3270_mutex); |
1315 | 1315 | ||
1316 | /* Reset 3270 device. */ | 1316 | /* Reset 3270 device. */ |
1317 | raw3270_reset_device(rp); | 1317 | raw3270_reset_device(rp); |
@@ -1371,13 +1371,13 @@ raw3270_init(void) | |||
1371 | rc = ccw_driver_register(&raw3270_ccw_driver); | 1371 | rc = ccw_driver_register(&raw3270_ccw_driver); |
1372 | if (rc == 0) { | 1372 | if (rc == 0) { |
1373 | /* Create attributes for early (= console) device. */ | 1373 | /* Create attributes for early (= console) device. */ |
1374 | down(&raw3270_sem); | 1374 | mutex_lock(&raw3270_mutex); |
1375 | class3270 = class_create(THIS_MODULE, "3270"); | 1375 | class3270 = class_create(THIS_MODULE, "3270"); |
1376 | list_for_each_entry(rp, &raw3270_devices, list) { | 1376 | list_for_each_entry(rp, &raw3270_devices, list) { |
1377 | get_device(&rp->cdev->dev); | 1377 | get_device(&rp->cdev->dev); |
1378 | raw3270_create_attributes(rp); | 1378 | raw3270_create_attributes(rp); |
1379 | } | 1379 | } |
1380 | up(&raw3270_sem); | 1380 | mutex_unlock(&raw3270_mutex); |
1381 | } | 1381 | } |
1382 | return rc; | 1382 | return rc; |
1383 | } | 1383 | } |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 01d865d93791..cd51ace8b610 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -250,6 +250,7 @@ extern void tape_free_request(struct tape_request *); | |||
250 | extern int tape_do_io(struct tape_device *, struct tape_request *); | 250 | extern int tape_do_io(struct tape_device *, struct tape_request *); |
251 | extern int tape_do_io_async(struct tape_device *, struct tape_request *); | 251 | extern int tape_do_io_async(struct tape_device *, struct tape_request *); |
252 | extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); | 252 | extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); |
253 | extern int tape_cancel_io(struct tape_device *, struct tape_request *); | ||
253 | void tape_hotplug_event(struct tape_device *, int major, int action); | 254 | void tape_hotplug_event(struct tape_device *, int major, int action); |
254 | 255 | ||
255 | static inline int | 256 | static inline int |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 682039cac15b..d4f2da738078 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/char/tape_34xx.c | 2 | * drivers/s390/char/tape_34xx.c |
3 | * tape device discipline for 3480/3490 tapes. | 3 | * tape device discipline for 3480/3490 tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * Copyright (C) IBM Corp. 2001,2006 |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 6 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 7 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -28,11 +27,6 @@ | |||
28 | debug_info_t *TAPE_DBF_AREA = NULL; | 27 | debug_info_t *TAPE_DBF_AREA = NULL; |
29 | EXPORT_SYMBOL(TAPE_DBF_AREA); | 28 | EXPORT_SYMBOL(TAPE_DBF_AREA); |
30 | 29 | ||
31 | enum tape_34xx_type { | ||
32 | tape_3480, | ||
33 | tape_3490, | ||
34 | }; | ||
35 | |||
36 | #define TAPE34XX_FMT_3480 0 | 30 | #define TAPE34XX_FMT_3480 0 |
37 | #define TAPE34XX_FMT_3480_2_XF 1 | 31 | #define TAPE34XX_FMT_3480_2_XF 1 |
38 | #define TAPE34XX_FMT_3480_XF 2 | 32 | #define TAPE34XX_FMT_3480_XF 2 |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c new file mode 100644 index 000000000000..c3915f60a3aa --- /dev/null +++ b/drivers/s390/char/tape_3590.c | |||
@@ -0,0 +1,1301 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/tape_3590.c | ||
3 | * tape device discipline for 3590 tapes. | ||
4 | * | ||
5 | * Copyright (C) IBM Corp. 2001,2006 | ||
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | ||
7 | * Michael Holzheu <holzheu@de.ibm.com> | ||
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/bio.h> | ||
15 | |||
16 | #define TAPE_DBF_AREA tape_3590_dbf | ||
17 | |||
18 | #include "tape.h" | ||
19 | #include "tape_std.h" | ||
20 | #include "tape_3590.h" | ||
21 | |||
22 | /* | ||
23 | * Pointer to debug area. | ||
24 | */ | ||
25 | debug_info_t *TAPE_DBF_AREA = NULL; | ||
26 | EXPORT_SYMBOL(TAPE_DBF_AREA); | ||
27 | |||
28 | /******************************************************************* | ||
29 | * Error Recovery fuctions: | ||
30 | * - Read Opposite: implemented | ||
31 | * - Read Device (buffered) log: BRA | ||
32 | * - Read Library log: BRA | ||
33 | * - Swap Devices: BRA | ||
34 | * - Long Busy: BRA | ||
35 | * - Special Intercept: BRA | ||
36 | * - Read Alternate: implemented | ||
37 | *******************************************************************/ | ||
38 | |||
39 | #define PRINTK_HEADER "TAPE_3590: " | ||
40 | |||
41 | static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { | ||
42 | [0x00] = "", | ||
43 | [0x10] = "Lost Sense", | ||
44 | [0x11] = "Assigned Elsewhere", | ||
45 | [0x12] = "Allegiance Reset", | ||
46 | [0x13] = "Shared Access Violation", | ||
47 | [0x20] = "Command Reject", | ||
48 | [0x21] = "Configuration Error", | ||
49 | [0x22] = "Protection Exception", | ||
50 | [0x23] = "Write Protect", | ||
51 | [0x24] = "Write Length", | ||
52 | [0x25] = "Read-Only Format", | ||
53 | [0x31] = "Beginning of Partition", | ||
54 | [0x33] = "End of Partition", | ||
55 | [0x34] = "End of Data", | ||
56 | [0x35] = "Block not found", | ||
57 | [0x40] = "Device Intervention", | ||
58 | [0x41] = "Loader Intervention", | ||
59 | [0x42] = "Library Intervention", | ||
60 | [0x50] = "Write Error", | ||
61 | [0x51] = "Erase Error", | ||
62 | [0x52] = "Formatting Error", | ||
63 | [0x53] = "Read Error", | ||
64 | [0x54] = "Unsupported Format", | ||
65 | [0x55] = "No Formatting", | ||
66 | [0x56] = "Positioning lost", | ||
67 | [0x57] = "Read Length", | ||
68 | [0x60] = "Unsupported Medium", | ||
69 | [0x61] = "Medium Length Error", | ||
70 | [0x62] = "Medium removed", | ||
71 | [0x64] = "Load Check", | ||
72 | [0x65] = "Unload Check", | ||
73 | [0x70] = "Equipment Check", | ||
74 | [0x71] = "Bus out Check", | ||
75 | [0x72] = "Protocol Error", | ||
76 | [0x73] = "Interface Error", | ||
77 | [0x74] = "Overrun", | ||
78 | [0x75] = "Halt Signal", | ||
79 | [0x90] = "Device fenced", | ||
80 | [0x91] = "Device Path fenced", | ||
81 | [0xa0] = "Volume misplaced", | ||
82 | [0xa1] = "Volume inaccessible", | ||
83 | [0xa2] = "Volume in input", | ||
84 | [0xa3] = "Volume ejected", | ||
85 | [0xa4] = "All categories reserved", | ||
86 | [0xa5] = "Duplicate Volume", | ||
87 | [0xa6] = "Library Manager Offline", | ||
88 | [0xa7] = "Library Output Station full", | ||
89 | [0xa8] = "Vision System non-operational", | ||
90 | [0xa9] = "Library Manager Equipment Check", | ||
91 | [0xaa] = "Library Equipment Check", | ||
92 | [0xab] = "All Library Cells full", | ||
93 | [0xac] = "No Cleaner Volumes in Library", | ||
94 | [0xad] = "I/O Station door open", | ||
95 | [0xae] = "Subsystem environmental alert", | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * 3590 IOCTL Overload | ||
100 | */ | ||
101 | static int | ||
102 | tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) | ||
103 | { | ||
104 | switch (cmd) { | ||
105 | case TAPE390_DISPLAY: { | ||
106 | struct display_struct disp; | ||
107 | |||
108 | if (copy_from_user(&disp, (char __user *) arg, sizeof(disp))) | ||
109 | return -EFAULT; | ||
110 | |||
111 | return tape_std_display(device, &disp); | ||
112 | } | ||
113 | default: | ||
114 | return -EINVAL; /* no additional ioctls */ | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * SENSE Medium: Get Sense data about medium state | ||
120 | */ | ||
121 | static int | ||
122 | tape_3590_sense_medium(struct tape_device *device) | ||
123 | { | ||
124 | struct tape_request *request; | ||
125 | |||
126 | request = tape_alloc_request(1, 128); | ||
127 | if (IS_ERR(request)) | ||
128 | return PTR_ERR(request); | ||
129 | request->op = TO_MSEN; | ||
130 | tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); | ||
131 | return tape_do_io_free(device, request); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * MTTELL: Tell block. Return the number of block relative to current file. | ||
136 | */ | ||
137 | static int | ||
138 | tape_3590_mttell(struct tape_device *device, int mt_count) | ||
139 | { | ||
140 | __u64 block_id; | ||
141 | int rc; | ||
142 | |||
143 | rc = tape_std_read_block_id(device, &block_id); | ||
144 | if (rc) | ||
145 | return rc; | ||
146 | return block_id >> 32; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * MTSEEK: seek to the specified block. | ||
151 | */ | ||
152 | static int | ||
153 | tape_3590_mtseek(struct tape_device *device, int count) | ||
154 | { | ||
155 | struct tape_request *request; | ||
156 | |||
157 | DBF_EVENT(6, "xsee id: %x\n", count); | ||
158 | request = tape_alloc_request(3, 4); | ||
159 | if (IS_ERR(request)) | ||
160 | return PTR_ERR(request); | ||
161 | request->op = TO_LBL; | ||
162 | tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); | ||
163 | *(__u32 *) request->cpdata = count; | ||
164 | tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); | ||
165 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); | ||
166 | return tape_do_io_free(device, request); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Read Opposite Error Recovery Function: | ||
171 | * Used, when Read Forward does not work | ||
172 | */ | ||
173 | static void | ||
174 | tape_3590_read_opposite(struct tape_device *device, | ||
175 | struct tape_request *request) | ||
176 | { | ||
177 | struct tape_3590_disc_data *data; | ||
178 | |||
179 | /* | ||
180 | * We have allocated 4 ccws in tape_std_read, so we can now | ||
181 | * transform the request to a read backward, followed by a | ||
182 | * forward space block. | ||
183 | */ | ||
184 | request->op = TO_RBA; | ||
185 | tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); | ||
186 | data = device->discdata; | ||
187 | tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, | ||
188 | device->char_data.idal_buf); | ||
189 | tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); | ||
190 | tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); | ||
191 | DBF_EVENT(6, "xrop ccwg\n"); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Read Attention Msg | ||
196 | * This should be done after an interrupt with attention bit (0x80) | ||
197 | * in device state. | ||
198 | * | ||
199 | * After a "read attention message" request there are two possible | ||
200 | * results: | ||
201 | * | ||
202 | * 1. A unit check is presented, when attention sense is present (e.g. when | ||
203 | * a medium has been unloaded). The attention sense comes then | ||
204 | * together with the unit check. The recovery action is either "retry" | ||
205 | * (in case there is an attention message pending) or "permanent error". | ||
206 | * | ||
207 | * 2. The attention msg is written to the "read subsystem data" buffer. | ||
208 | * In this case we probably should print it to the console. | ||
209 | */ | ||
210 | static int | ||
211 | tape_3590_read_attmsg(struct tape_device *device) | ||
212 | { | ||
213 | struct tape_request *request; | ||
214 | char *buf; | ||
215 | |||
216 | request = tape_alloc_request(3, 4096); | ||
217 | if (IS_ERR(request)) | ||
218 | return PTR_ERR(request); | ||
219 | request->op = TO_READ_ATTMSG; | ||
220 | buf = request->cpdata; | ||
221 | buf[0] = PREP_RD_SS_DATA; | ||
222 | buf[6] = RD_ATTMSG; /* read att msg */ | ||
223 | tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); | ||
224 | tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); | ||
225 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); | ||
226 | return tape_do_io_free(device, request); | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * These functions are used to schedule follow-up actions from within an | ||
231 | * interrupt context (like unsolicited interrupts). | ||
232 | */ | ||
233 | static void | ||
234 | tape_3590_work_handler(void *data) | ||
235 | { | ||
236 | struct { | ||
237 | struct tape_device *device; | ||
238 | enum tape_op op; | ||
239 | struct work_struct work; | ||
240 | } *p = data; | ||
241 | |||
242 | switch (p->op) { | ||
243 | case TO_MSEN: | ||
244 | tape_3590_sense_medium(p->device); | ||
245 | break; | ||
246 | case TO_READ_ATTMSG: | ||
247 | tape_3590_read_attmsg(p->device); | ||
248 | break; | ||
249 | default: | ||
250 | DBF_EVENT(3, "T3590: work handler undefined for " | ||
251 | "operation 0x%02x\n", p->op); | ||
252 | } | ||
253 | tape_put_device(p->device); | ||
254 | kfree(p); | ||
255 | } | ||
256 | |||
257 | static int | ||
258 | tape_3590_schedule_work(struct tape_device *device, enum tape_op op) | ||
259 | { | ||
260 | struct { | ||
261 | struct tape_device *device; | ||
262 | enum tape_op op; | ||
263 | struct work_struct work; | ||
264 | } *p; | ||
265 | |||
266 | if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) | ||
267 | return -ENOMEM; | ||
268 | |||
269 | INIT_WORK(&p->work, tape_3590_work_handler, p); | ||
270 | |||
271 | p->device = tape_get_device_reference(device); | ||
272 | p->op = op; | ||
273 | |||
274 | schedule_work(&p->work); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | #ifdef CONFIG_S390_TAPE_BLOCK | ||
279 | /* | ||
280 | * Tape Block READ | ||
281 | */ | ||
282 | static struct tape_request * | ||
283 | tape_3590_bread(struct tape_device *device, struct request *req) | ||
284 | { | ||
285 | struct tape_request *request; | ||
286 | struct ccw1 *ccw; | ||
287 | int count = 0, start_block, i; | ||
288 | unsigned off; | ||
289 | char *dst; | ||
290 | struct bio_vec *bv; | ||
291 | struct bio *bio; | ||
292 | |||
293 | DBF_EVENT(6, "xBREDid:"); | ||
294 | start_block = req->sector >> TAPEBLOCK_HSEC_S2B; | ||
295 | DBF_EVENT(6, "start_block = %i\n", start_block); | ||
296 | |||
297 | rq_for_each_bio(bio, req) { | ||
298 | bio_for_each_segment(bv, bio, i) { | ||
299 | count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); | ||
300 | } | ||
301 | } | ||
302 | request = tape_alloc_request(2 + count + 1, 4); | ||
303 | if (IS_ERR(request)) | ||
304 | return request; | ||
305 | request->op = TO_BLOCK; | ||
306 | *(__u32 *) request->cpdata = start_block; | ||
307 | ccw = request->cpaddr; | ||
308 | ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); | ||
309 | |||
310 | /* | ||
311 | * We always setup a nop after the mode set ccw. This slot is | ||
312 | * used in tape_std_check_locate to insert a locate ccw if the | ||
313 | * current tape position doesn't match the start block to be read. | ||
314 | */ | ||
315 | ccw = tape_ccw_cc(ccw, NOP, 0, NULL); | ||
316 | |||
317 | rq_for_each_bio(bio, req) { | ||
318 | bio_for_each_segment(bv, bio, i) { | ||
319 | dst = kmap(bv->bv_page) + bv->bv_offset; | ||
320 | for (off = 0; off < bv->bv_len; | ||
321 | off += TAPEBLOCK_HSEC_SIZE) { | ||
322 | ccw->flags = CCW_FLAG_CC; | ||
323 | ccw->cmd_code = READ_FORWARD; | ||
324 | ccw->count = TAPEBLOCK_HSEC_SIZE; | ||
325 | set_normalized_cda(ccw, (void *) __pa(dst)); | ||
326 | ccw++; | ||
327 | dst += TAPEBLOCK_HSEC_SIZE; | ||
328 | } | ||
329 | if (off > bv->bv_len) | ||
330 | BUG(); | ||
331 | } | ||
332 | } | ||
333 | ccw = tape_ccw_end(ccw, NOP, 0, NULL); | ||
334 | DBF_EVENT(6, "xBREDccwg\n"); | ||
335 | return request; | ||
336 | } | ||
337 | |||
338 | static void | ||
339 | tape_3590_free_bread(struct tape_request *request) | ||
340 | { | ||
341 | struct ccw1 *ccw; | ||
342 | |||
343 | /* Last ccw is a nop and doesn't need clear_normalized_cda */ | ||
344 | for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) | ||
345 | if (ccw->cmd_code == READ_FORWARD) | ||
346 | clear_normalized_cda(ccw); | ||
347 | tape_free_request(request); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * check_locate is called just before the tape request is passed to | ||
352 | * the common io layer for execution. It has to check the current | ||
353 | * tape position and insert a locate ccw if it doesn't match the | ||
354 | * start block for the request. | ||
355 | */ | ||
356 | static void | ||
357 | tape_3590_check_locate(struct tape_device *device, struct tape_request *request) | ||
358 | { | ||
359 | __u32 *start_block; | ||
360 | |||
361 | start_block = (__u32 *) request->cpdata; | ||
362 | if (*start_block != device->blk_data.block_position) { | ||
363 | /* Add the start offset of the file to get the real block. */ | ||
364 | *start_block += device->bof; | ||
365 | tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); | ||
366 | } | ||
367 | } | ||
368 | #endif | ||
369 | |||
370 | /* | ||
371 | * The done handler is called at device/channel end and wakes up the sleeping | ||
372 | * process | ||
373 | */ | ||
374 | static int | ||
375 | tape_3590_done(struct tape_device *device, struct tape_request *request) | ||
376 | { | ||
377 | struct tape_3590_med_sense *sense; | ||
378 | |||
379 | DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); | ||
380 | |||
381 | switch (request->op) { | ||
382 | case TO_BSB: | ||
383 | case TO_BSF: | ||
384 | case TO_DSE: | ||
385 | case TO_FSB: | ||
386 | case TO_FSF: | ||
387 | case TO_LBL: | ||
388 | case TO_RFO: | ||
389 | case TO_RBA: | ||
390 | case TO_REW: | ||
391 | case TO_WRI: | ||
392 | case TO_WTM: | ||
393 | case TO_BLOCK: | ||
394 | case TO_LOAD: | ||
395 | tape_med_state_set(device, MS_LOADED); | ||
396 | break; | ||
397 | case TO_RUN: | ||
398 | tape_med_state_set(device, MS_UNLOADED); | ||
399 | break; | ||
400 | case TO_MSEN: | ||
401 | sense = (struct tape_3590_med_sense *) request->cpdata; | ||
402 | if (sense->masst == MSENSE_UNASSOCIATED) | ||
403 | tape_med_state_set(device, MS_UNLOADED); | ||
404 | if (sense->masst == MSENSE_ASSOCIATED_MOUNT) | ||
405 | tape_med_state_set(device, MS_LOADED); | ||
406 | break; | ||
407 | case TO_RBI: /* RBI seems to succeed even without medium loaded. */ | ||
408 | case TO_NOP: /* Same to NOP. */ | ||
409 | case TO_READ_CONFIG: | ||
410 | case TO_READ_ATTMSG: | ||
411 | case TO_DIS: | ||
412 | case TO_ASSIGN: | ||
413 | case TO_UNASSIGN: | ||
414 | break; | ||
415 | case TO_SIZE: | ||
416 | break; | ||
417 | } | ||
418 | return TAPE_IO_SUCCESS; | ||
419 | } | ||
420 | |||
421 | /* | ||
422 | * This fuction is called, when error recovery was successfull | ||
423 | */ | ||
424 | static inline int | ||
425 | tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) | ||
426 | { | ||
427 | DBF_EVENT(3, "Error Recovery successfull for %s\n", | ||
428 | tape_op_verbose[request->op]); | ||
429 | return tape_3590_done(device, request); | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * This fuction is called, when error recovery was not successfull | ||
434 | */ | ||
435 | static inline int | ||
436 | tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, | ||
437 | struct irb *irb, int rc) | ||
438 | { | ||
439 | DBF_EVENT(3, "Error Recovery failed for %s\n", | ||
440 | tape_op_verbose[request->op]); | ||
441 | tape_dump_sense_dbf(device, request, irb); | ||
442 | return rc; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Error Recovery do retry | ||
447 | */ | ||
448 | static inline int | ||
449 | tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, | ||
450 | struct irb *irb) | ||
451 | { | ||
452 | DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]); | ||
453 | tape_dump_sense_dbf(device, request, irb); | ||
454 | return TAPE_IO_RETRY; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Handle unsolicited interrupts | ||
459 | */ | ||
460 | static int | ||
461 | tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) | ||
462 | { | ||
463 | if (irb->scsw.dstat == DEV_STAT_CHN_END) | ||
464 | /* Probably result of halt ssch */ | ||
465 | return TAPE_IO_PENDING; | ||
466 | else if (irb->scsw.dstat == 0x85) | ||
467 | /* Device Ready -> check medium state */ | ||
468 | tape_3590_schedule_work(device, TO_MSEN); | ||
469 | else if (irb->scsw.dstat & DEV_STAT_ATTENTION) | ||
470 | tape_3590_schedule_work(device, TO_READ_ATTMSG); | ||
471 | else { | ||
472 | DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); | ||
473 | PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); | ||
474 | tape_dump_sense(device, NULL, irb); | ||
475 | } | ||
476 | return TAPE_IO_SUCCESS; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Basic Recovery routine | ||
481 | */ | ||
482 | static int | ||
483 | tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, | ||
484 | struct irb *irb, int rc) | ||
485 | { | ||
486 | struct tape_3590_sense *sense; | ||
487 | |||
488 | sense = (struct tape_3590_sense *) irb->ecw; | ||
489 | |||
490 | switch (sense->bra) { | ||
491 | case SENSE_BRA_PER: | ||
492 | return tape_3590_erp_failed(device, request, irb, rc); | ||
493 | case SENSE_BRA_CONT: | ||
494 | return tape_3590_erp_succeded(device, request); | ||
495 | case SENSE_BRA_RE: | ||
496 | return tape_3590_erp_retry(device, request, irb); | ||
497 | case SENSE_BRA_DRE: | ||
498 | return tape_3590_erp_failed(device, request, irb, rc); | ||
499 | default: | ||
500 | PRINT_ERR("Unknown BRA %x - This should not happen!\n", | ||
501 | sense->bra); | ||
502 | BUG(); | ||
503 | return TAPE_IO_STOP; | ||
504 | } | ||
505 | } | ||
506 | |||
507 | /* | ||
508 | * RDL: Read Device (buffered) log | ||
509 | */ | ||
510 | static int | ||
511 | tape_3590_erp_read_buf_log(struct tape_device *device, | ||
512 | struct tape_request *request, struct irb *irb) | ||
513 | { | ||
514 | /* | ||
515 | * We just do the basic error recovery at the moment (retry). | ||
516 | * Perhaps in the future, we read the log and dump it somewhere... | ||
517 | */ | ||
518 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * SWAP: Swap Devices | ||
523 | */ | ||
524 | static int | ||
525 | tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, | ||
526 | struct irb *irb) | ||
527 | { | ||
528 | /* | ||
529 | * This error recovery should swap the tapes | ||
530 | * if the original has a problem. The operation | ||
531 | * should proceed with the new tape... this | ||
532 | * should probably be done in user space! | ||
533 | */ | ||
534 | PRINT_WARN("(%s): Swap Tape Device!\n", device->cdev->dev.bus_id); | ||
535 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
536 | } | ||
537 | |||
538 | /* | ||
539 | * LBY: Long Busy | ||
540 | */ | ||
541 | static int | ||
542 | tape_3590_erp_long_busy(struct tape_device *device, | ||
543 | struct tape_request *request, struct irb *irb) | ||
544 | { | ||
545 | /* FIXME: how about WAITING for a minute ? */ | ||
546 | PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", | ||
547 | device->cdev->dev.bus_id); | ||
548 | return tape_3590_erp_basic(device, request, irb, -EBUSY); | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * SPI: Special Intercept | ||
553 | */ | ||
554 | static int | ||
555 | tape_3590_erp_special_interrupt(struct tape_device *device, | ||
556 | struct tape_request *request, struct irb *irb) | ||
557 | { | ||
558 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * RDA: Read Alternate | ||
563 | */ | ||
564 | static int | ||
565 | tape_3590_erp_read_alternate(struct tape_device *device, | ||
566 | struct tape_request *request, struct irb *irb) | ||
567 | { | ||
568 | struct tape_3590_disc_data *data; | ||
569 | |||
570 | /* | ||
571 | * The issued Read Backward or Read Previous command is not | ||
572 | * supported by the device | ||
573 | * The recovery action should be to issue another command: | ||
574 | * Read Revious: if Read Backward is not supported | ||
575 | * Read Backward: if Read Previous is not supported | ||
576 | */ | ||
577 | data = device->discdata; | ||
578 | if (data->read_back_op == READ_PREVIOUS) { | ||
579 | DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", | ||
580 | device->cdev_id); | ||
581 | data->read_back_op = READ_BACKWARD; | ||
582 | } else { | ||
583 | DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", | ||
584 | device->cdev_id); | ||
585 | data->read_back_op = READ_PREVIOUS; | ||
586 | } | ||
587 | tape_3590_read_opposite(device, request); | ||
588 | return tape_3590_erp_retry(device, request, irb); | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Error Recovery read opposite | ||
593 | */ | ||
594 | static int | ||
595 | tape_3590_erp_read_opposite(struct tape_device *device, | ||
596 | struct tape_request *request, struct irb *irb) | ||
597 | { | ||
598 | switch (request->op) { | ||
599 | case TO_RFO: | ||
600 | /* | ||
601 | * We did read forward, but the data could not be read. | ||
602 | * We will read backward and then skip forward again. | ||
603 | */ | ||
604 | tape_3590_read_opposite(device, request); | ||
605 | return tape_3590_erp_retry(device, request, irb); | ||
606 | case TO_RBA: | ||
607 | /* We tried to read forward and backward, but hat no success */ | ||
608 | return tape_3590_erp_failed(device, request, irb, -EIO); | ||
609 | break; | ||
610 | default: | ||
611 | PRINT_WARN("read_opposite_recovery_called_with_op: %s\n", | ||
612 | tape_op_verbose[request->op]); | ||
613 | return tape_3590_erp_failed(device, request, irb, -EIO); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | /* | ||
618 | * Print an MIM (Media Information Message) (message code f0) | ||
619 | */ | ||
620 | static void | ||
621 | tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) | ||
622 | { | ||
623 | struct tape_3590_sense *sense; | ||
624 | |||
625 | sense = (struct tape_3590_sense *) irb->ecw; | ||
626 | /* Exception Message */ | ||
627 | switch (sense->fmt.f70.emc) { | ||
628 | case 0x02: | ||
629 | PRINT_WARN("(%s): Data degraded\n", device->cdev->dev.bus_id); | ||
630 | break; | ||
631 | case 0x03: | ||
632 | PRINT_WARN("(%s): Data degraded in partion %i\n", | ||
633 | device->cdev->dev.bus_id, sense->fmt.f70.mp); | ||
634 | break; | ||
635 | case 0x04: | ||
636 | PRINT_WARN("(%s): Medium degraded\n", device->cdev->dev.bus_id); | ||
637 | break; | ||
638 | case 0x05: | ||
639 | PRINT_WARN("(%s): Medium degraded in partition %i\n", | ||
640 | device->cdev->dev.bus_id, sense->fmt.f70.mp); | ||
641 | break; | ||
642 | case 0x06: | ||
643 | PRINT_WARN("(%s): Block 0 Error\n", device->cdev->dev.bus_id); | ||
644 | break; | ||
645 | case 0x07: | ||
646 | PRINT_WARN("(%s): Medium Exception 0x%02x\n", | ||
647 | device->cdev->dev.bus_id, sense->fmt.f70.md); | ||
648 | break; | ||
649 | default: | ||
650 | PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n", | ||
651 | device->cdev->dev.bus_id, sense->fmt.f70.emc); | ||
652 | break; | ||
653 | } | ||
654 | /* Service Message */ | ||
655 | switch (sense->fmt.f70.smc) { | ||
656 | case 0x02: | ||
657 | PRINT_WARN("(%s): Reference Media maintenance procedure %i\n", | ||
658 | device->cdev->dev.bus_id, sense->fmt.f70.md); | ||
659 | break; | ||
660 | default: | ||
661 | PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n", | ||
662 | device->cdev->dev.bus_id, sense->fmt.f70.smc); | ||
663 | break; | ||
664 | } | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Print an I/O Subsystem Service Information Message (message code f1) | ||
669 | */ | ||
670 | static void | ||
671 | tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) | ||
672 | { | ||
673 | struct tape_3590_sense *sense; | ||
674 | |||
675 | sense = (struct tape_3590_sense *) irb->ecw; | ||
676 | /* Exception Message */ | ||
677 | switch (sense->fmt.f71.emc) { | ||
678 | case 0x01: | ||
679 | PRINT_WARN("(%s): Effect of failure is unknown\n", | ||
680 | device->cdev->dev.bus_id); | ||
681 | break; | ||
682 | case 0x02: | ||
683 | PRINT_WARN("(%s): CU Exception - no performance impact\n", | ||
684 | device->cdev->dev.bus_id); | ||
685 | break; | ||
686 | case 0x03: | ||
687 | PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n", | ||
688 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
689 | break; | ||
690 | case 0x04: | ||
691 | PRINT_WARN("(%s): CU Exception on device path 0x%02x\n", | ||
692 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
693 | break; | ||
694 | case 0x05: | ||
695 | PRINT_WARN("(%s): CU Exception on library path 0x%02x\n", | ||
696 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
697 | break; | ||
698 | case 0x06: | ||
699 | PRINT_WARN("(%s): CU Exception on node 0x%02x\n", | ||
700 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
701 | break; | ||
702 | case 0x07: | ||
703 | PRINT_WARN("(%s): CU Exception on partition 0x%02x\n", | ||
704 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
705 | break; | ||
706 | default: | ||
707 | PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n", | ||
708 | device->cdev->dev.bus_id, sense->fmt.f71.emc); | ||
709 | } | ||
710 | /* Service Message */ | ||
711 | switch (sense->fmt.f71.smc) { | ||
712 | case 0x01: | ||
713 | PRINT_WARN("(%s): Repair impact is unknown\n", | ||
714 | device->cdev->dev.bus_id); | ||
715 | break; | ||
716 | case 0x02: | ||
717 | PRINT_WARN("(%s): Repair will not impact cu performance\n", | ||
718 | device->cdev->dev.bus_id); | ||
719 | break; | ||
720 | case 0x03: | ||
721 | if (sense->fmt.f71.mdf == 0) | ||
722 | PRINT_WARN("(%s): Repair will disable node " | ||
723 | "0x%x on CU\n", | ||
724 | device->cdev->dev.bus_id, | ||
725 | sense->fmt.f71.md[1]); | ||
726 | else | ||
727 | PRINT_WARN("(%s): Repair will disable nodes " | ||
728 | "(0x%x-0x%x) on CU\n", | ||
729 | device->cdev->dev.bus_id, | ||
730 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
731 | break; | ||
732 | case 0x04: | ||
733 | if (sense->fmt.f71.mdf == 0) | ||
734 | PRINT_WARN("(%s): Repair will disable cannel path " | ||
735 | "0x%x on CU\n", | ||
736 | device->cdev->dev.bus_id, | ||
737 | sense->fmt.f71.md[1]); | ||
738 | else | ||
739 | PRINT_WARN("(%s): Repair will disable cannel paths " | ||
740 | "(0x%x-0x%x) on CU\n", | ||
741 | device->cdev->dev.bus_id, | ||
742 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
743 | break; | ||
744 | case 0x05: | ||
745 | if (sense->fmt.f71.mdf == 0) | ||
746 | PRINT_WARN("(%s): Repair will disable device path " | ||
747 | "0x%x on CU\n", | ||
748 | device->cdev->dev.bus_id, | ||
749 | sense->fmt.f71.md[1]); | ||
750 | else | ||
751 | PRINT_WARN("(%s): Repair will disable device paths " | ||
752 | "(0x%x-0x%x) on CU\n", | ||
753 | device->cdev->dev.bus_id, | ||
754 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
755 | break; | ||
756 | case 0x06: | ||
757 | if (sense->fmt.f71.mdf == 0) | ||
758 | PRINT_WARN("(%s): Repair will disable library path " | ||
759 | "0x%x on CU\n", | ||
760 | device->cdev->dev.bus_id, | ||
761 | sense->fmt.f71.md[1]); | ||
762 | else | ||
763 | PRINT_WARN("(%s): Repair will disable library paths " | ||
764 | "(0x%x-0x%x) on CU\n", | ||
765 | device->cdev->dev.bus_id, | ||
766 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
767 | break; | ||
768 | case 0x07: | ||
769 | PRINT_WARN("(%s): Repair will disable access to CU\n", | ||
770 | device->cdev->dev.bus_id); | ||
771 | break; | ||
772 | default: | ||
773 | PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n", | ||
774 | device->cdev->dev.bus_id, sense->fmt.f71.smc); | ||
775 | } | ||
776 | } | ||
777 | |||
778 | /* | ||
779 | * Print an Device Subsystem Service Information Message (message code f2) | ||
780 | */ | ||
781 | static void | ||
782 | tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) | ||
783 | { | ||
784 | struct tape_3590_sense *sense; | ||
785 | |||
786 | sense = (struct tape_3590_sense *) irb->ecw; | ||
787 | /* Exception Message */ | ||
788 | switch (sense->fmt.f71.emc) { | ||
789 | case 0x01: | ||
790 | PRINT_WARN("(%s): Effect of failure is unknown\n", | ||
791 | device->cdev->dev.bus_id); | ||
792 | break; | ||
793 | case 0x02: | ||
794 | PRINT_WARN("(%s): DV Exception - no performance impact\n", | ||
795 | device->cdev->dev.bus_id); | ||
796 | break; | ||
797 | case 0x03: | ||
798 | PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n", | ||
799 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
800 | break; | ||
801 | case 0x04: | ||
802 | PRINT_WARN("(%s): DV Exception on loader 0x%02x\n", | ||
803 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
804 | break; | ||
805 | case 0x05: | ||
806 | PRINT_WARN("(%s): DV Exception on message display 0x%02x\n", | ||
807 | device->cdev->dev.bus_id, sense->fmt.f71.md[0]); | ||
808 | break; | ||
809 | case 0x06: | ||
810 | PRINT_WARN("(%s): DV Exception in tape path\n", | ||
811 | device->cdev->dev.bus_id); | ||
812 | break; | ||
813 | case 0x07: | ||
814 | PRINT_WARN("(%s): DV Exception in drive\n", | ||
815 | device->cdev->dev.bus_id); | ||
816 | break; | ||
817 | default: | ||
818 | PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n", | ||
819 | device->cdev->dev.bus_id, sense->fmt.f71.emc); | ||
820 | } | ||
821 | /* Service Message */ | ||
822 | switch (sense->fmt.f71.smc) { | ||
823 | case 0x01: | ||
824 | PRINT_WARN("(%s): Repair impact is unknown\n", | ||
825 | device->cdev->dev.bus_id); | ||
826 | break; | ||
827 | case 0x02: | ||
828 | PRINT_WARN("(%s): Repair will not impact device performance\n", | ||
829 | device->cdev->dev.bus_id); | ||
830 | break; | ||
831 | case 0x03: | ||
832 | if (sense->fmt.f71.mdf == 0) | ||
833 | PRINT_WARN("(%s): Repair will disable channel path " | ||
834 | "0x%x on DV\n", | ||
835 | device->cdev->dev.bus_id, | ||
836 | sense->fmt.f71.md[1]); | ||
837 | else | ||
838 | PRINT_WARN("(%s): Repair will disable channel path " | ||
839 | "(0x%x-0x%x) on DV\n", | ||
840 | device->cdev->dev.bus_id, | ||
841 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
842 | break; | ||
843 | case 0x04: | ||
844 | if (sense->fmt.f71.mdf == 0) | ||
845 | PRINT_WARN("(%s): Repair will disable interface 0x%x " | ||
846 | "on DV\n", | ||
847 | device->cdev->dev.bus_id, | ||
848 | sense->fmt.f71.md[1]); | ||
849 | else | ||
850 | PRINT_WARN("(%s): Repair will disable interfaces " | ||
851 | "(0x%x-0x%x) on DV\n", | ||
852 | device->cdev->dev.bus_id, | ||
853 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
854 | break; | ||
855 | case 0x05: | ||
856 | if (sense->fmt.f71.mdf == 0) | ||
857 | PRINT_WARN("(%s): Repair will disable loader 0x%x " | ||
858 | "on DV\n", | ||
859 | device->cdev->dev.bus_id, | ||
860 | sense->fmt.f71.md[1]); | ||
861 | else | ||
862 | PRINT_WARN("(%s): Repair will disable loader " | ||
863 | "(0x%x-0x%x) on DV\n", | ||
864 | device->cdev->dev.bus_id, | ||
865 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
866 | break; | ||
867 | case 0x07: | ||
868 | PRINT_WARN("(%s): Repair will disable access to DV\n", | ||
869 | device->cdev->dev.bus_id); | ||
870 | break; | ||
871 | case 0x08: | ||
872 | if (sense->fmt.f71.mdf == 0) | ||
873 | PRINT_WARN("(%s): Repair will disable message " | ||
874 | "display 0x%x on DV\n", | ||
875 | device->cdev->dev.bus_id, | ||
876 | sense->fmt.f71.md[1]); | ||
877 | else | ||
878 | PRINT_WARN("(%s): Repair will disable message " | ||
879 | "displays (0x%x-0x%x) on DV\n", | ||
880 | device->cdev->dev.bus_id, | ||
881 | sense->fmt.f71.md[1], sense->fmt.f71.md[2]); | ||
882 | break; | ||
883 | case 0x09: | ||
884 | PRINT_WARN("(%s): Clean DV\n", device->cdev->dev.bus_id); | ||
885 | break; | ||
886 | default: | ||
887 | PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n", | ||
888 | device->cdev->dev.bus_id, sense->fmt.f71.smc); | ||
889 | } | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * Print standard ERA Message | ||
894 | */ | ||
895 | static void | ||
896 | tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) | ||
897 | { | ||
898 | struct tape_3590_sense *sense; | ||
899 | |||
900 | sense = (struct tape_3590_sense *) irb->ecw; | ||
901 | if (sense->mc == 0) | ||
902 | return; | ||
903 | if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { | ||
904 | if (tape_3590_msg[sense->mc] != NULL) | ||
905 | PRINT_WARN("(%s): %s\n", device->cdev->dev.bus_id, | ||
906 | tape_3590_msg[sense->mc]); | ||
907 | else { | ||
908 | PRINT_WARN("(%s): Message Code 0x%x\n", | ||
909 | device->cdev->dev.bus_id, sense->mc); | ||
910 | } | ||
911 | return; | ||
912 | } | ||
913 | if (sense->mc == 0xf0) { | ||
914 | /* Standard Media Information Message */ | ||
915 | PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, " | ||
916 | "RC=%02x-%04x-%02x\n", device->cdev->dev.bus_id, | ||
917 | sense->fmt.f70.sev, sense->mc, | ||
918 | sense->fmt.f70.emc, sense->fmt.f70.smc, | ||
919 | sense->fmt.f70.refcode, sense->fmt.f70.mid, | ||
920 | sense->fmt.f70.fid); | ||
921 | tape_3590_print_mim_msg_f0(device, irb); | ||
922 | return; | ||
923 | } | ||
924 | if (sense->mc == 0xf1) { | ||
925 | /* Standard I/O Subsystem Service Information Message */ | ||
926 | PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, " | ||
927 | "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", | ||
928 | device->cdev->dev.bus_id, sense->fmt.f71.sev, | ||
929 | device->cdev->id.dev_model, | ||
930 | sense->mc, sense->fmt.f71.emc, | ||
931 | sense->fmt.f71.smc, sense->fmt.f71.refcode1, | ||
932 | sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); | ||
933 | tape_3590_print_io_sim_msg_f1(device, irb); | ||
934 | return; | ||
935 | } | ||
936 | if (sense->mc == 0xf2) { | ||
937 | /* Standard Device Service Information Message */ | ||
938 | PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, " | ||
939 | "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", | ||
940 | device->cdev->dev.bus_id, sense->fmt.f71.sev, | ||
941 | device->cdev->id.dev_model, | ||
942 | sense->mc, sense->fmt.f71.emc, | ||
943 | sense->fmt.f71.smc, sense->fmt.f71.refcode1, | ||
944 | sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); | ||
945 | tape_3590_print_dev_sim_msg_f2(device, irb); | ||
946 | return; | ||
947 | } | ||
948 | if (sense->mc == 0xf3) { | ||
949 | /* Standard Library Service Information Message */ | ||
950 | return; | ||
951 | } | ||
952 | PRINT_WARN("(%s): Device Message(%x)\n", | ||
953 | device->cdev->dev.bus_id, sense->mc); | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * 3590 error Recovery routine: | ||
958 | * If possible, it tries to recover from the error. If this is not possible, | ||
959 | * inform the user about the problem. | ||
960 | */ | ||
961 | static int | ||
962 | tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | ||
963 | struct irb *irb) | ||
964 | { | ||
965 | struct tape_3590_sense *sense; | ||
966 | int rc; | ||
967 | |||
968 | #ifdef CONFIG_S390_TAPE_BLOCK | ||
969 | if (request->op == TO_BLOCK) { | ||
970 | /* | ||
971 | * Recovery for block device requests. Set the block_position | ||
972 | * to something invalid and retry. | ||
973 | */ | ||
974 | device->blk_data.block_position = -1; | ||
975 | if (request->retries-- <= 0) | ||
976 | return tape_3590_erp_failed(device, request, irb, -EIO); | ||
977 | else | ||
978 | return tape_3590_erp_retry(device, request, irb); | ||
979 | } | ||
980 | #endif | ||
981 | |||
982 | sense = (struct tape_3590_sense *) irb->ecw; | ||
983 | |||
984 | /* | ||
985 | * First check all RC-QRCs where we want to do something special | ||
986 | * - "break": basic error recovery is done | ||
987 | * - "goto out:": just print error message if available | ||
988 | */ | ||
989 | rc = -EIO; | ||
990 | switch (sense->rc_rqc) { | ||
991 | |||
992 | case 0x1110: | ||
993 | tape_3590_print_era_msg(device, irb); | ||
994 | return tape_3590_erp_read_buf_log(device, request, irb); | ||
995 | |||
996 | case 0x2011: | ||
997 | tape_3590_print_era_msg(device, irb); | ||
998 | return tape_3590_erp_read_alternate(device, request, irb); | ||
999 | |||
1000 | case 0x2230: | ||
1001 | case 0x2231: | ||
1002 | tape_3590_print_era_msg(device, irb); | ||
1003 | return tape_3590_erp_special_interrupt(device, request, irb); | ||
1004 | |||
1005 | case 0x3010: | ||
1006 | DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", | ||
1007 | device->cdev_id); | ||
1008 | return tape_3590_erp_basic(device, request, irb, -ENOSPC); | ||
1009 | case 0x3012: | ||
1010 | DBF_EVENT(2, "(%08x): Forward at End of Partition\n", | ||
1011 | device->cdev_id); | ||
1012 | return tape_3590_erp_basic(device, request, irb, -ENOSPC); | ||
1013 | case 0x3020: | ||
1014 | DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); | ||
1015 | return tape_3590_erp_basic(device, request, irb, -ENOSPC); | ||
1016 | |||
1017 | case 0x3122: | ||
1018 | DBF_EVENT(2, "(%08x): Rewind Unload initiated\n", | ||
1019 | device->cdev_id); | ||
1020 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
1021 | case 0x3123: | ||
1022 | DBF_EVENT(2, "(%08x): Rewind Unload complete\n", | ||
1023 | device->cdev_id); | ||
1024 | tape_med_state_set(device, MS_UNLOADED); | ||
1025 | return tape_3590_erp_basic(device, request, irb, 0); | ||
1026 | |||
1027 | case 0x4010: | ||
1028 | /* | ||
1029 | * print additional msg since default msg | ||
1030 | * "device intervention" is not very meaningfull | ||
1031 | */ | ||
1032 | PRINT_WARN("(%s): Tape operation when medium not loaded\n", | ||
1033 | device->cdev->dev.bus_id); | ||
1034 | tape_med_state_set(device, MS_UNLOADED); | ||
1035 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | ||
1036 | case 0x4012: /* Device Long Busy */ | ||
1037 | tape_3590_print_era_msg(device, irb); | ||
1038 | return tape_3590_erp_long_busy(device, request, irb); | ||
1039 | |||
1040 | case 0x5010: | ||
1041 | if (sense->rac == 0xd0) { | ||
1042 | /* Swap */ | ||
1043 | tape_3590_print_era_msg(device, irb); | ||
1044 | return tape_3590_erp_swap(device, request, irb); | ||
1045 | } | ||
1046 | if (sense->rac == 0x26) { | ||
1047 | /* Read Opposite */ | ||
1048 | tape_3590_print_era_msg(device, irb); | ||
1049 | return tape_3590_erp_read_opposite(device, request, | ||
1050 | irb); | ||
1051 | } | ||
1052 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
1053 | case 0x5020: | ||
1054 | case 0x5021: | ||
1055 | case 0x5022: | ||
1056 | case 0x5040: | ||
1057 | case 0x5041: | ||
1058 | case 0x5042: | ||
1059 | tape_3590_print_era_msg(device, irb); | ||
1060 | return tape_3590_erp_swap(device, request, irb); | ||
1061 | |||
1062 | case 0x5110: | ||
1063 | case 0x5111: | ||
1064 | return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); | ||
1065 | |||
1066 | case 0x5120: | ||
1067 | case 0x1120: | ||
1068 | tape_med_state_set(device, MS_UNLOADED); | ||
1069 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | ||
1070 | |||
1071 | case 0x6020: | ||
1072 | PRINT_WARN("(%s): Cartridge of wrong type ?\n", | ||
1073 | device->cdev->dev.bus_id); | ||
1074 | return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); | ||
1075 | |||
1076 | case 0x8011: | ||
1077 | PRINT_WARN("(%s): Another host has reserved the tape device\n", | ||
1078 | device->cdev->dev.bus_id); | ||
1079 | return tape_3590_erp_basic(device, request, irb, -EPERM); | ||
1080 | case 0x8013: | ||
1081 | PRINT_WARN("(%s): Another host has priviliged access to the " | ||
1082 | "tape device\n", device->cdev->dev.bus_id); | ||
1083 | PRINT_WARN("(%s): To solve the problem unload the current " | ||
1084 | "cartridge!\n", device->cdev->dev.bus_id); | ||
1085 | return tape_3590_erp_basic(device, request, irb, -EPERM); | ||
1086 | default: | ||
1087 | return tape_3590_erp_basic(device, request, irb, -EIO); | ||
1088 | } | ||
1089 | } | ||
1090 | |||
1091 | /* | ||
1092 | * 3590 interrupt handler: | ||
1093 | */ | ||
1094 | static int | ||
1095 | tape_3590_irq(struct tape_device *device, struct tape_request *request, | ||
1096 | struct irb *irb) | ||
1097 | { | ||
1098 | if (request == NULL) | ||
1099 | return tape_3590_unsolicited_irq(device, irb); | ||
1100 | |||
1101 | if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && | ||
1102 | (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { | ||
1103 | /* Write at end of volume */ | ||
1104 | DBF_EVENT(2, "End of volume\n"); | ||
1105 | return tape_3590_erp_failed(device, request, irb, -ENOSPC); | ||
1106 | } | ||
1107 | |||
1108 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | ||
1109 | return tape_3590_unit_check(device, request, irb); | ||
1110 | |||
1111 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | ||
1112 | if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { | ||
1113 | if (request->op == TO_FSB || request->op == TO_BSB) | ||
1114 | request->rescnt++; | ||
1115 | else | ||
1116 | DBF_EVENT(5, "Unit Exception!\n"); | ||
1117 | } | ||
1118 | |||
1119 | return tape_3590_done(device, request); | ||
1120 | } | ||
1121 | |||
1122 | if (irb->scsw.dstat & DEV_STAT_CHN_END) { | ||
1123 | DBF_EVENT(2, "cannel end\n"); | ||
1124 | return TAPE_IO_PENDING; | ||
1125 | } | ||
1126 | |||
1127 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | ||
1128 | DBF_EVENT(2, "Unit Attention when busy..\n"); | ||
1129 | return TAPE_IO_PENDING; | ||
1130 | } | ||
1131 | |||
1132 | DBF_EVENT(6, "xunknownirq\n"); | ||
1133 | PRINT_ERR("Unexpected interrupt.\n"); | ||
1134 | PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]); | ||
1135 | tape_dump_sense(device, request, irb); | ||
1136 | return TAPE_IO_STOP; | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Setup device function | ||
1141 | */ | ||
1142 | static int | ||
1143 | tape_3590_setup_device(struct tape_device *device) | ||
1144 | { | ||
1145 | int rc; | ||
1146 | struct tape_3590_disc_data *data; | ||
1147 | |||
1148 | DBF_EVENT(6, "3590 device setup\n"); | ||
1149 | data = kmalloc(sizeof(struct tape_3590_disc_data), | ||
1150 | GFP_KERNEL | GFP_DMA); | ||
1151 | if (data == NULL) | ||
1152 | return -ENOMEM; | ||
1153 | data->read_back_op = READ_PREVIOUS; | ||
1154 | device->discdata = data; | ||
1155 | |||
1156 | if ((rc = tape_std_assign(device)) == 0) { | ||
1157 | /* Try to find out if medium is loaded */ | ||
1158 | if ((rc = tape_3590_sense_medium(device)) != 0) | ||
1159 | DBF_LH(3, "3590 medium sense returned %d\n", rc); | ||
1160 | } | ||
1161 | |||
1162 | return rc; | ||
1163 | } | ||
1164 | |||
1165 | /* | ||
1166 | * Cleanup device function | ||
1167 | */ | ||
1168 | static void | ||
1169 | tape_3590_cleanup_device(struct tape_device *device) | ||
1170 | { | ||
1171 | tape_std_unassign(device); | ||
1172 | |||
1173 | kfree(device->discdata); | ||
1174 | device->discdata = NULL; | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * List of 3590 magnetic tape commands. | ||
1179 | */ | ||
1180 | static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = { | ||
1181 | [MTRESET] = tape_std_mtreset, | ||
1182 | [MTFSF] = tape_std_mtfsf, | ||
1183 | [MTBSF] = tape_std_mtbsf, | ||
1184 | [MTFSR] = tape_std_mtfsr, | ||
1185 | [MTBSR] = tape_std_mtbsr, | ||
1186 | [MTWEOF] = tape_std_mtweof, | ||
1187 | [MTREW] = tape_std_mtrew, | ||
1188 | [MTOFFL] = tape_std_mtoffl, | ||
1189 | [MTNOP] = tape_std_mtnop, | ||
1190 | [MTRETEN] = tape_std_mtreten, | ||
1191 | [MTBSFM] = tape_std_mtbsfm, | ||
1192 | [MTFSFM] = tape_std_mtfsfm, | ||
1193 | [MTEOM] = tape_std_mteom, | ||
1194 | [MTERASE] = tape_std_mterase, | ||
1195 | [MTRAS1] = NULL, | ||
1196 | [MTRAS2] = NULL, | ||
1197 | [MTRAS3] = NULL, | ||
1198 | [MTSETBLK] = tape_std_mtsetblk, | ||
1199 | [MTSETDENSITY] = NULL, | ||
1200 | [MTSEEK] = tape_3590_mtseek, | ||
1201 | [MTTELL] = tape_3590_mttell, | ||
1202 | [MTSETDRVBUFFER] = NULL, | ||
1203 | [MTFSS] = NULL, | ||
1204 | [MTBSS] = NULL, | ||
1205 | [MTWSM] = NULL, | ||
1206 | [MTLOCK] = NULL, | ||
1207 | [MTUNLOCK] = NULL, | ||
1208 | [MTLOAD] = tape_std_mtload, | ||
1209 | [MTUNLOAD] = tape_std_mtunload, | ||
1210 | [MTCOMPRESSION] = tape_std_mtcompression, | ||
1211 | [MTSETPART] = NULL, | ||
1212 | [MTMKPART] = NULL | ||
1213 | }; | ||
1214 | |||
1215 | /* | ||
1216 | * Tape discipline structure for 3590. | ||
1217 | */ | ||
1218 | static struct tape_discipline tape_discipline_3590 = { | ||
1219 | .owner = THIS_MODULE, | ||
1220 | .setup_device = tape_3590_setup_device, | ||
1221 | .cleanup_device = tape_3590_cleanup_device, | ||
1222 | .process_eov = tape_std_process_eov, | ||
1223 | .irq = tape_3590_irq, | ||
1224 | .read_block = tape_std_read_block, | ||
1225 | .write_block = tape_std_write_block, | ||
1226 | #ifdef CONFIG_S390_TAPE_BLOCK | ||
1227 | .bread = tape_3590_bread, | ||
1228 | .free_bread = tape_3590_free_bread, | ||
1229 | .check_locate = tape_3590_check_locate, | ||
1230 | #endif | ||
1231 | .ioctl_fn = tape_3590_ioctl, | ||
1232 | .mtop_array = tape_3590_mtop | ||
1233 | }; | ||
1234 | |||
1235 | static struct ccw_device_id tape_3590_ids[] = { | ||
1236 | {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, | ||
1237 | { /* end of list */ } | ||
1238 | }; | ||
1239 | |||
1240 | static int | ||
1241 | tape_3590_online(struct ccw_device *cdev) | ||
1242 | { | ||
1243 | return tape_generic_online(cdev->dev.driver_data, | ||
1244 | &tape_discipline_3590); | ||
1245 | } | ||
1246 | |||
1247 | static int | ||
1248 | tape_3590_offline(struct ccw_device *cdev) | ||
1249 | { | ||
1250 | return tape_generic_offline(cdev->dev.driver_data); | ||
1251 | } | ||
1252 | |||
1253 | static struct ccw_driver tape_3590_driver = { | ||
1254 | .name = "tape_3590", | ||
1255 | .owner = THIS_MODULE, | ||
1256 | .ids = tape_3590_ids, | ||
1257 | .probe = tape_generic_probe, | ||
1258 | .remove = tape_generic_remove, | ||
1259 | .set_offline = tape_3590_offline, | ||
1260 | .set_online = tape_3590_online, | ||
1261 | }; | ||
1262 | |||
1263 | /* | ||
1264 | * Setup discipline structure. | ||
1265 | */ | ||
1266 | static int | ||
1267 | tape_3590_init(void) | ||
1268 | { | ||
1269 | int rc; | ||
1270 | |||
1271 | TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long)); | ||
1272 | debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); | ||
1273 | #ifdef DBF_LIKE_HELL | ||
1274 | debug_set_level(TAPE_DBF_AREA, 6); | ||
1275 | #endif | ||
1276 | |||
1277 | DBF_EVENT(3, "3590 init\n"); | ||
1278 | /* Register driver for 3590 tapes. */ | ||
1279 | rc = ccw_driver_register(&tape_3590_driver); | ||
1280 | if (rc) | ||
1281 | DBF_EVENT(3, "3590 init failed\n"); | ||
1282 | else | ||
1283 | DBF_EVENT(3, "3590 registered\n"); | ||
1284 | return rc; | ||
1285 | } | ||
1286 | |||
1287 | static void | ||
1288 | tape_3590_exit(void) | ||
1289 | { | ||
1290 | ccw_driver_unregister(&tape_3590_driver); | ||
1291 | |||
1292 | debug_unregister(TAPE_DBF_AREA); | ||
1293 | } | ||
1294 | |||
1295 | MODULE_DEVICE_TABLE(ccw, tape_3590_ids); | ||
1296 | MODULE_AUTHOR("(C) 2001,2006 IBM Corporation"); | ||
1297 | MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver"); | ||
1298 | MODULE_LICENSE("GPL"); | ||
1299 | |||
1300 | module_init(tape_3590_init); | ||
1301 | module_exit(tape_3590_exit); | ||
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h new file mode 100644 index 000000000000..cf274b9445a6 --- /dev/null +++ b/drivers/s390/char/tape_3590.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/tape_3590.h | ||
3 | * tape device discipline for 3590 tapes. | ||
4 | * | ||
5 | * Copyright (C) IBM Corp. 2001,2006 | ||
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | ||
7 | * Michael Holzheu <holzheu@de.ibm.com> | ||
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #ifndef _TAPE_3590_H | ||
12 | #define _TAPE_3590_H | ||
13 | |||
14 | #define MEDIUM_SENSE 0xc2 | ||
15 | #define READ_PREVIOUS 0x0a | ||
16 | #define MODE_SENSE 0xcf | ||
17 | #define PERFORM_SS_FUNC 0x77 | ||
18 | #define READ_SS_DATA 0x3e | ||
19 | |||
20 | #define PREP_RD_SS_DATA 0x18 | ||
21 | #define RD_ATTMSG 0x3 | ||
22 | |||
23 | #define SENSE_BRA_PER 0 | ||
24 | #define SENSE_BRA_CONT 1 | ||
25 | #define SENSE_BRA_RE 2 | ||
26 | #define SENSE_BRA_DRE 3 | ||
27 | |||
28 | #define SENSE_FMT_LIBRARY 0x23 | ||
29 | #define SENSE_FMT_UNSOLICITED 0x40 | ||
30 | #define SENSE_FMT_COMMAND_REJ 0x41 | ||
31 | #define SENSE_FMT_COMMAND_EXEC0 0x50 | ||
32 | #define SENSE_FMT_COMMAND_EXEC1 0x51 | ||
33 | #define SENSE_FMT_EVENT0 0x60 | ||
34 | #define SENSE_FMT_EVENT1 0x61 | ||
35 | #define SENSE_FMT_MIM 0x70 | ||
36 | #define SENSE_FMT_SIM 0x71 | ||
37 | |||
38 | #define MSENSE_UNASSOCIATED 0x00 | ||
39 | #define MSENSE_ASSOCIATED_MOUNT 0x01 | ||
40 | #define MSENSE_ASSOCIATED_UMOUNT 0x02 | ||
41 | |||
42 | #define TAPE_3590_MAX_MSG 0xb0 | ||
43 | |||
44 | /* Datatypes */ | ||
45 | |||
46 | struct tape_3590_disc_data { | ||
47 | unsigned char modeset_byte; | ||
48 | int read_back_op; | ||
49 | }; | ||
50 | |||
51 | struct tape_3590_sense { | ||
52 | |||
53 | unsigned int command_rej:1; | ||
54 | unsigned int interv_req:1; | ||
55 | unsigned int bus_out_check:1; | ||
56 | unsigned int eq_check:1; | ||
57 | unsigned int data_check:1; | ||
58 | unsigned int overrun:1; | ||
59 | unsigned int def_unit_check:1; | ||
60 | unsigned int assgnd_elsew:1; | ||
61 | |||
62 | unsigned int locate_fail:1; | ||
63 | unsigned int inst_online:1; | ||
64 | unsigned int reserved:1; | ||
65 | unsigned int blk_seq_err:1; | ||
66 | unsigned int begin_part:1; | ||
67 | unsigned int wr_mode:1; | ||
68 | unsigned int wr_prot:1; | ||
69 | unsigned int not_cap:1; | ||
70 | |||
71 | unsigned int bra:2; | ||
72 | unsigned int lc:3; | ||
73 | unsigned int vlf_active:1; | ||
74 | unsigned int stm:1; | ||
75 | unsigned int med_pos:1; | ||
76 | |||
77 | unsigned int rac:8; | ||
78 | |||
79 | unsigned int rc_rqc:16; | ||
80 | |||
81 | unsigned int mc:8; | ||
82 | |||
83 | unsigned int sense_fmt:8; | ||
84 | |||
85 | union { | ||
86 | struct { | ||
87 | unsigned int emc:4; | ||
88 | unsigned int smc:4; | ||
89 | unsigned int sev:2; | ||
90 | unsigned int reserved:6; | ||
91 | unsigned int md:8; | ||
92 | unsigned int refcode:8; | ||
93 | unsigned int mid:16; | ||
94 | unsigned int mp:16; | ||
95 | unsigned char volid[6]; | ||
96 | unsigned int fid:8; | ||
97 | } f70; | ||
98 | struct { | ||
99 | unsigned int emc:4; | ||
100 | unsigned int smc:4; | ||
101 | unsigned int sev:2; | ||
102 | unsigned int reserved1:5; | ||
103 | unsigned int mdf:1; | ||
104 | unsigned char md[3]; | ||
105 | unsigned int simid:8; | ||
106 | unsigned int uid:16; | ||
107 | unsigned int refcode1:16; | ||
108 | unsigned int refcode2:16; | ||
109 | unsigned int refcode3:16; | ||
110 | unsigned int reserved2:8; | ||
111 | } f71; | ||
112 | unsigned char data[14]; | ||
113 | } fmt; | ||
114 | unsigned char pad[10]; | ||
115 | |||
116 | } __attribute__ ((packed)); | ||
117 | |||
118 | struct tape_3590_med_sense { | ||
119 | unsigned int macst:4; | ||
120 | unsigned int masst:4; | ||
121 | char pad[127]; | ||
122 | } __attribute__ ((packed)); | ||
123 | |||
124 | #endif /* _TAPE_3590_H */ | ||
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index b3569c82bb16..a5c68e60fcf4 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c | |||
@@ -44,11 +44,10 @@ struct tape_class_device *register_tape_dev( | |||
44 | int rc; | 44 | int rc; |
45 | char * s; | 45 | char * s; |
46 | 46 | ||
47 | tcd = kmalloc(sizeof(struct tape_class_device), GFP_KERNEL); | 47 | tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL); |
48 | if (!tcd) | 48 | if (!tcd) |
49 | return ERR_PTR(-ENOMEM); | 49 | return ERR_PTR(-ENOMEM); |
50 | 50 | ||
51 | memset(tcd, 0, sizeof(struct tape_class_device)); | ||
52 | strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); | 51 | strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); |
53 | for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) | 52 | for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) |
54 | *s = '!'; | 53 | *s = '!'; |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 4ea438c749c9..389ee2c0f443 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -453,16 +453,14 @@ tape_alloc_device(void) | |||
453 | { | 453 | { |
454 | struct tape_device *device; | 454 | struct tape_device *device; |
455 | 455 | ||
456 | device = (struct tape_device *) | 456 | device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); |
457 | kmalloc(sizeof(struct tape_device), GFP_KERNEL); | ||
458 | if (device == NULL) { | 457 | if (device == NULL) { |
459 | DBF_EXCEPTION(2, "ti:no mem\n"); | 458 | DBF_EXCEPTION(2, "ti:no mem\n"); |
460 | PRINT_INFO ("can't allocate memory for " | 459 | PRINT_INFO ("can't allocate memory for " |
461 | "tape info structure\n"); | 460 | "tape info structure\n"); |
462 | return ERR_PTR(-ENOMEM); | 461 | return ERR_PTR(-ENOMEM); |
463 | } | 462 | } |
464 | memset(device, 0, sizeof(struct tape_device)); | 463 | device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); |
465 | device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); | ||
466 | if (device->modeset_byte == NULL) { | 464 | if (device->modeset_byte == NULL) { |
467 | DBF_EXCEPTION(2, "ti:no mem\n"); | 465 | DBF_EXCEPTION(2, "ti:no mem\n"); |
468 | PRINT_INFO("can't allocate memory for modeset byte\n"); | 466 | PRINT_INFO("can't allocate memory for modeset byte\n"); |
@@ -659,34 +657,30 @@ tape_alloc_request(int cplength, int datasize) | |||
659 | 657 | ||
660 | DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); | 658 | DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); |
661 | 659 | ||
662 | request = (struct tape_request *) kmalloc(sizeof(struct tape_request), | 660 | request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); |
663 | GFP_KERNEL); | ||
664 | if (request == NULL) { | 661 | if (request == NULL) { |
665 | DBF_EXCEPTION(1, "cqra nomem\n"); | 662 | DBF_EXCEPTION(1, "cqra nomem\n"); |
666 | return ERR_PTR(-ENOMEM); | 663 | return ERR_PTR(-ENOMEM); |
667 | } | 664 | } |
668 | memset(request, 0, sizeof(struct tape_request)); | ||
669 | /* allocate channel program */ | 665 | /* allocate channel program */ |
670 | if (cplength > 0) { | 666 | if (cplength > 0) { |
671 | request->cpaddr = kmalloc(cplength*sizeof(struct ccw1), | 667 | request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), |
672 | GFP_ATOMIC | GFP_DMA); | 668 | GFP_ATOMIC | GFP_DMA); |
673 | if (request->cpaddr == NULL) { | 669 | if (request->cpaddr == NULL) { |
674 | DBF_EXCEPTION(1, "cqra nomem\n"); | 670 | DBF_EXCEPTION(1, "cqra nomem\n"); |
675 | kfree(request); | 671 | kfree(request); |
676 | return ERR_PTR(-ENOMEM); | 672 | return ERR_PTR(-ENOMEM); |
677 | } | 673 | } |
678 | memset(request->cpaddr, 0, cplength*sizeof(struct ccw1)); | ||
679 | } | 674 | } |
680 | /* alloc small kernel buffer */ | 675 | /* alloc small kernel buffer */ |
681 | if (datasize > 0) { | 676 | if (datasize > 0) { |
682 | request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); | 677 | request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); |
683 | if (request->cpdata == NULL) { | 678 | if (request->cpdata == NULL) { |
684 | DBF_EXCEPTION(1, "cqra nomem\n"); | 679 | DBF_EXCEPTION(1, "cqra nomem\n"); |
685 | kfree(request->cpaddr); | 680 | kfree(request->cpaddr); |
686 | kfree(request); | 681 | kfree(request); |
687 | return ERR_PTR(-ENOMEM); | 682 | return ERR_PTR(-ENOMEM); |
688 | } | 683 | } |
689 | memset(request->cpdata, 0, datasize); | ||
690 | } | 684 | } |
691 | DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, | 685 | DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, |
692 | request->cpdata); | 686 | request->cpdata); |
@@ -761,6 +755,13 @@ __tape_start_next_request(struct tape_device *device) | |||
761 | */ | 755 | */ |
762 | if (request->status == TAPE_REQUEST_IN_IO) | 756 | if (request->status == TAPE_REQUEST_IN_IO) |
763 | return; | 757 | return; |
758 | /* | ||
759 | * Request has already been stopped. We have to wait until | ||
760 | * the request is removed from the queue in the interrupt | ||
761 | * handling. | ||
762 | */ | ||
763 | if (request->status == TAPE_REQUEST_DONE) | ||
764 | return; | ||
764 | 765 | ||
765 | /* | 766 | /* |
766 | * We wanted to cancel the request but the common I/O layer | 767 | * We wanted to cancel the request but the common I/O layer |
@@ -1015,7 +1016,7 @@ tape_do_io_interruptible(struct tape_device *device, | |||
1015 | wq, | 1016 | wq, |
1016 | (request->callback == NULL) | 1017 | (request->callback == NULL) |
1017 | ); | 1018 | ); |
1018 | } while (rc != -ERESTARTSYS); | 1019 | } while (rc == -ERESTARTSYS); |
1019 | 1020 | ||
1020 | DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); | 1021 | DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); |
1021 | rc = -ERESTARTSYS; | 1022 | rc = -ERESTARTSYS; |
@@ -1024,6 +1025,20 @@ tape_do_io_interruptible(struct tape_device *device, | |||
1024 | } | 1025 | } |
1025 | 1026 | ||
1026 | /* | 1027 | /* |
1028 | * Stop running ccw. | ||
1029 | */ | ||
1030 | int | ||
1031 | tape_cancel_io(struct tape_device *device, struct tape_request *request) | ||
1032 | { | ||
1033 | int rc; | ||
1034 | |||
1035 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1036 | rc = __tape_cancel_io(device, request); | ||
1037 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1038 | return rc; | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1027 | * Tape interrupt routine, called from the ccw_device layer | 1042 | * Tape interrupt routine, called from the ccw_device layer |
1028 | */ | 1043 | */ |
1029 | static void | 1044 | static void |
@@ -1064,15 +1079,16 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1064 | /* | 1079 | /* |
1065 | * If the condition code is not zero and the start function bit is | 1080 | * If the condition code is not zero and the start function bit is |
1066 | * still set, this is an deferred error and the last start I/O did | 1081 | * still set, this is an deferred error and the last start I/O did |
1067 | * not succeed. Restart the request now. | 1082 | * not succeed. At this point the condition that caused the deferred |
1083 | * error might still apply. So we just schedule the request to be | ||
1084 | * started later. | ||
1068 | */ | 1085 | */ |
1069 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { | 1086 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && |
1070 | PRINT_WARN("(%s): deferred cc=%i. restaring\n", | 1087 | (request->status == TAPE_REQUEST_IN_IO)) { |
1071 | cdev->dev.bus_id, | 1088 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", |
1072 | irb->scsw.cc); | 1089 | device->cdev_id, irb->scsw.cc, irb->scsw.fctl); |
1073 | rc = __tape_start_io(device, request); | 1090 | request->status = TAPE_REQUEST_QUEUED; |
1074 | if (rc) | 1091 | schedule_delayed_work(&device->tape_dnr, HZ); |
1075 | __tape_end_request(device, request, rc); | ||
1076 | return; | 1092 | return; |
1077 | } | 1093 | } |
1078 | 1094 | ||
@@ -1286,4 +1302,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf); | |||
1286 | EXPORT_SYMBOL(tape_do_io); | 1302 | EXPORT_SYMBOL(tape_do_io); |
1287 | EXPORT_SYMBOL(tape_do_io_async); | 1303 | EXPORT_SYMBOL(tape_do_io_async); |
1288 | EXPORT_SYMBOL(tape_do_io_interruptible); | 1304 | EXPORT_SYMBOL(tape_do_io_interruptible); |
1305 | EXPORT_SYMBOL(tape_cancel_io); | ||
1289 | EXPORT_SYMBOL(tape_mtop); | 1306 | EXPORT_SYMBOL(tape_mtop); |
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 2f9fe30989a7..99cf881f41db 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c | |||
@@ -37,20 +37,19 @@ tape_std_assign_timeout(unsigned long data) | |||
37 | { | 37 | { |
38 | struct tape_request * request; | 38 | struct tape_request * request; |
39 | struct tape_device * device; | 39 | struct tape_device * device; |
40 | int rc; | ||
40 | 41 | ||
41 | request = (struct tape_request *) data; | 42 | request = (struct tape_request *) data; |
42 | if ((device = request->device) == NULL) | 43 | if ((device = request->device) == NULL) |
43 | BUG(); | 44 | BUG(); |
44 | 45 | ||
45 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 46 | DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", |
46 | if (request->callback != NULL) { | ||
47 | DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", | ||
48 | device->cdev_id); | 47 | device->cdev_id); |
49 | PRINT_ERR("%s: Assignment timeout. Device busy.\n", | 48 | rc = tape_cancel_io(device, request); |
50 | device->cdev->dev.bus_id); | 49 | if(rc) |
51 | ccw_device_clear(device->cdev, (long) request); | 50 | PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n", |
52 | } | 51 | device->cdev->dev.bus_id, rc); |
53 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 52 | |
54 | } | 53 | } |
55 | 54 | ||
56 | int | 55 | int |
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h index 3ab6aafb7343..2d311798edf4 100644 --- a/drivers/s390/char/tape_std.h +++ b/drivers/s390/char/tape_std.h | |||
@@ -1,9 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/tape_34xx.h | 2 | * drivers/s390/char/tape_std.h |
3 | * standard tape device functions for ibm tapes. | 3 | * standard tape device functions for ibm tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * Copyright (C) IBM Corp. 2001,2006 |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 6 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 7 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -149,4 +148,11 @@ void tape_std_error_recovery_do_retry(struct tape_device *); | |||
149 | void tape_std_error_recovery_read_opposite(struct tape_device *); | 148 | void tape_std_error_recovery_read_opposite(struct tape_device *); |
150 | void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); | 149 | void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); |
151 | 150 | ||
151 | /* S390 tape types */ | ||
152 | enum s390_tape_type { | ||
153 | tape_3480, | ||
154 | tape_3490, | ||
155 | tape_3590, | ||
156 | }; | ||
157 | |||
152 | #endif // _TAPE_STD_H | 158 | #endif // _TAPE_STD_H |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 4b9069370388..9a141776873f 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -691,10 +691,9 @@ tty3270_alloc_view(void) | |||
691 | struct tty3270 *tp; | 691 | struct tty3270 *tp; |
692 | int pages; | 692 | int pages; |
693 | 693 | ||
694 | tp = kmalloc(sizeof(struct tty3270),GFP_KERNEL); | 694 | tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL); |
695 | if (!tp) | 695 | if (!tp) |
696 | goto out_err; | 696 | goto out_err; |
697 | memset(tp, 0, sizeof(struct tty3270)); | ||
698 | tp->freemem_pages = | 697 | tp->freemem_pages = |
699 | kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL); | 698 | kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL); |
700 | if (!tp->freemem_pages) | 699 | if (!tp->freemem_pages) |
@@ -767,16 +766,14 @@ tty3270_alloc_screen(struct tty3270 *tp) | |||
767 | int lines; | 766 | int lines; |
768 | 767 | ||
769 | size = sizeof(struct tty3270_line) * (tp->view.rows - 2); | 768 | size = sizeof(struct tty3270_line) * (tp->view.rows - 2); |
770 | tp->screen = kmalloc(size, GFP_KERNEL); | 769 | tp->screen = kzalloc(size, GFP_KERNEL); |
771 | if (!tp->screen) | 770 | if (!tp->screen) |
772 | goto out_err; | 771 | goto out_err; |
773 | memset(tp->screen, 0, size); | ||
774 | for (lines = 0; lines < tp->view.rows - 2; lines++) { | 772 | for (lines = 0; lines < tp->view.rows - 2; lines++) { |
775 | size = sizeof(struct tty3270_cell) * tp->view.cols; | 773 | size = sizeof(struct tty3270_cell) * tp->view.cols; |
776 | tp->screen[lines].cells = kmalloc(size, GFP_KERNEL); | 774 | tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); |
777 | if (!tp->screen[lines].cells) | 775 | if (!tp->screen[lines].cells) |
778 | goto out_screen; | 776 | goto out_screen; |
779 | memset(tp->screen[lines].cells, 0, size); | ||
780 | } | 777 | } |
781 | return 0; | 778 | return 0; |
782 | out_screen: | 779 | out_screen: |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b2d75de144c6..c625b69ebd19 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -759,9 +759,8 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { | |||
759 | struct device *dev; | 759 | struct device *dev; |
760 | int ret; | 760 | int ret; |
761 | 761 | ||
762 | dev = kmalloc(sizeof(struct device), GFP_KERNEL); | 762 | dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
763 | if (dev) { | 763 | if (dev) { |
764 | memset(dev, 0, sizeof(struct device)); | ||
765 | snprintf(dev->bus_id, BUS_ID_SIZE, "%s", | 764 | snprintf(dev->bus_id, BUS_ID_SIZE, "%s", |
766 | priv->internal_name); | 765 | priv->internal_name); |
767 | dev->bus = &iucv_bus; | 766 | dev->bus = &iucv_bus; |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 8013c8eb76fe..bdfee7fbaa2e 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -157,11 +157,10 @@ ccwgroup_create(struct device *root, | |||
157 | if (argc > 256) /* disallow dumb users */ | 157 | if (argc > 256) /* disallow dumb users */ |
158 | return -EINVAL; | 158 | return -EINVAL; |
159 | 159 | ||
160 | gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); | 160 | gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); |
161 | if (!gdev) | 161 | if (!gdev) |
162 | return -ENOMEM; | 162 | return -ENOMEM; |
163 | 163 | ||
164 | memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0])); | ||
165 | atomic_set(&gdev->onoff, 0); | 164 | atomic_set(&gdev->onoff, 0); |
166 | 165 | ||
167 | del_drvdata = 0; | 166 | del_drvdata = 0; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index f4183d660258..6412b2c3edd3 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -98,10 +98,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | |||
98 | 98 | ||
99 | ssd_area = page; | 99 | ssd_area = page; |
100 | 100 | ||
101 | ssd_area->request = (struct chsc_header) { | 101 | ssd_area->request.length = 0x0010; |
102 | .length = 0x0010, | 102 | ssd_area->request.code = 0x0004; |
103 | .code = 0x0004, | ||
104 | }; | ||
105 | 103 | ||
106 | ssd_area->ssid = sch->schid.ssid; | 104 | ssd_area->ssid = sch->schid.ssid; |
107 | ssd_area->f_sch = sch->schid.sch_no; | 105 | ssd_area->f_sch = sch->schid.sch_no; |
@@ -517,10 +515,8 @@ chsc_process_crw(void) | |||
517 | struct device *dev; | 515 | struct device *dev; |
518 | memset(sei_area, 0, sizeof(*sei_area)); | 516 | memset(sei_area, 0, sizeof(*sei_area)); |
519 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 517 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
520 | sei_area->request = (struct chsc_header) { | 518 | sei_area->request.length = 0x0010; |
521 | .length = 0x0010, | 519 | sei_area->request.code = 0x000e; |
522 | .code = 0x000e, | ||
523 | }; | ||
524 | 520 | ||
525 | ccode = chsc(sei_area); | 521 | ccode = chsc(sei_area); |
526 | if (ccode > 0) | 522 | if (ccode > 0) |
@@ -875,6 +871,264 @@ s390_vary_chpid( __u8 chpid, int on) | |||
875 | } | 871 | } |
876 | 872 | ||
877 | /* | 873 | /* |
874 | * Channel measurement related functions | ||
875 | */ | ||
876 | static ssize_t | ||
877 | chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, | ||
878 | size_t count) | ||
879 | { | ||
880 | struct channel_path *chp; | ||
881 | unsigned int size; | ||
882 | |||
883 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
884 | if (!chp->cmg_chars) | ||
885 | return 0; | ||
886 | |||
887 | size = sizeof(struct cmg_chars); | ||
888 | |||
889 | if (off > size) | ||
890 | return 0; | ||
891 | if (off + count > size) | ||
892 | count = size - off; | ||
893 | memcpy(buf, chp->cmg_chars + off, count); | ||
894 | return count; | ||
895 | } | ||
896 | |||
897 | static struct bin_attribute chp_measurement_chars_attr = { | ||
898 | .attr = { | ||
899 | .name = "measurement_chars", | ||
900 | .mode = S_IRUSR, | ||
901 | .owner = THIS_MODULE, | ||
902 | }, | ||
903 | .size = sizeof(struct cmg_chars), | ||
904 | .read = chp_measurement_chars_read, | ||
905 | }; | ||
906 | |||
907 | static void | ||
908 | chp_measurement_copy_block(struct cmg_entry *buf, | ||
909 | struct channel_subsystem *css, int chpid) | ||
910 | { | ||
911 | void *area; | ||
912 | struct cmg_entry *entry, reference_buf; | ||
913 | int idx; | ||
914 | |||
915 | if (chpid < 128) { | ||
916 | area = css->cub_addr1; | ||
917 | idx = chpid; | ||
918 | } else { | ||
919 | area = css->cub_addr2; | ||
920 | idx = chpid - 128; | ||
921 | } | ||
922 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
923 | do { | ||
924 | memcpy(buf, entry, sizeof(*entry)); | ||
925 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
926 | } while (reference_buf.values[0] != buf->values[0]); | ||
927 | } | ||
928 | |||
929 | static ssize_t | ||
930 | chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | ||
931 | { | ||
932 | struct channel_path *chp; | ||
933 | struct channel_subsystem *css; | ||
934 | unsigned int size; | ||
935 | |||
936 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
937 | css = to_css(chp->dev.parent); | ||
938 | |||
939 | size = sizeof(struct cmg_chars); | ||
940 | |||
941 | /* Only allow single reads. */ | ||
942 | if (off || count < size) | ||
943 | return 0; | ||
944 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); | ||
945 | return count; | ||
946 | } | ||
947 | |||
948 | static struct bin_attribute chp_measurement_attr = { | ||
949 | .attr = { | ||
950 | .name = "measurement", | ||
951 | .mode = S_IRUSR, | ||
952 | .owner = THIS_MODULE, | ||
953 | }, | ||
954 | .size = sizeof(struct cmg_entry), | ||
955 | .read = chp_measurement_read, | ||
956 | }; | ||
957 | |||
958 | static void | ||
959 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | ||
960 | { | ||
961 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); | ||
962 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); | ||
963 | } | ||
964 | |||
965 | static int | ||
966 | chsc_add_chp_cmg_attr(struct channel_path *chp) | ||
967 | { | ||
968 | int ret; | ||
969 | |||
970 | ret = sysfs_create_bin_file(&chp->dev.kobj, | ||
971 | &chp_measurement_chars_attr); | ||
972 | if (ret) | ||
973 | return ret; | ||
974 | ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); | ||
975 | if (ret) | ||
976 | sysfs_remove_bin_file(&chp->dev.kobj, | ||
977 | &chp_measurement_chars_attr); | ||
978 | return ret; | ||
979 | } | ||
980 | |||
981 | static void | ||
982 | chsc_remove_cmg_attr(struct channel_subsystem *css) | ||
983 | { | ||
984 | int i; | ||
985 | |||
986 | for (i = 0; i <= __MAX_CHPID; i++) { | ||
987 | if (!css->chps[i]) | ||
988 | continue; | ||
989 | chsc_remove_chp_cmg_attr(css->chps[i]); | ||
990 | } | ||
991 | } | ||
992 | |||
993 | static int | ||
994 | chsc_add_cmg_attr(struct channel_subsystem *css) | ||
995 | { | ||
996 | int i, ret; | ||
997 | |||
998 | ret = 0; | ||
999 | for (i = 0; i <= __MAX_CHPID; i++) { | ||
1000 | if (!css->chps[i]) | ||
1001 | continue; | ||
1002 | ret = chsc_add_chp_cmg_attr(css->chps[i]); | ||
1003 | if (ret) | ||
1004 | goto cleanup; | ||
1005 | } | ||
1006 | return ret; | ||
1007 | cleanup: | ||
1008 | for (--i; i >= 0; i--) { | ||
1009 | if (!css->chps[i]) | ||
1010 | continue; | ||
1011 | chsc_remove_chp_cmg_attr(css->chps[i]); | ||
1012 | } | ||
1013 | return ret; | ||
1014 | } | ||
1015 | |||
1016 | |||
1017 | static int | ||
1018 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | ||
1019 | { | ||
1020 | struct { | ||
1021 | struct chsc_header request; | ||
1022 | u32 operation_code : 2; | ||
1023 | u32 : 30; | ||
1024 | u32 key : 4; | ||
1025 | u32 : 28; | ||
1026 | u32 zeroes1; | ||
1027 | u32 cub_addr1; | ||
1028 | u32 zeroes2; | ||
1029 | u32 cub_addr2; | ||
1030 | u32 reserved[13]; | ||
1031 | struct chsc_header response; | ||
1032 | u32 status : 8; | ||
1033 | u32 : 4; | ||
1034 | u32 fmt : 4; | ||
1035 | u32 : 16; | ||
1036 | } *secm_area; | ||
1037 | int ret, ccode; | ||
1038 | |||
1039 | secm_area = page; | ||
1040 | secm_area->request.length = 0x0050; | ||
1041 | secm_area->request.code = 0x0016; | ||
1042 | |||
1043 | secm_area->key = PAGE_DEFAULT_KEY; | ||
1044 | secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; | ||
1045 | secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; | ||
1046 | |||
1047 | secm_area->operation_code = enable ? 0 : 1; | ||
1048 | |||
1049 | ccode = chsc(secm_area); | ||
1050 | if (ccode > 0) | ||
1051 | return (ccode == 3) ? -ENODEV : -EBUSY; | ||
1052 | |||
1053 | switch (secm_area->response.code) { | ||
1054 | case 0x0001: /* Success. */ | ||
1055 | ret = 0; | ||
1056 | break; | ||
1057 | case 0x0003: /* Invalid block. */ | ||
1058 | case 0x0007: /* Invalid format. */ | ||
1059 | case 0x0008: /* Other invalid block. */ | ||
1060 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
1061 | ret = -EINVAL; | ||
1062 | break; | ||
1063 | case 0x0004: /* Command not provided in model. */ | ||
1064 | CIO_CRW_EVENT(2, "Model does not provide secm\n"); | ||
1065 | ret = -EOPNOTSUPP; | ||
1066 | break; | ||
1067 | case 0x0102: /* cub adresses incorrect */ | ||
1068 | CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); | ||
1069 | ret = -EINVAL; | ||
1070 | break; | ||
1071 | case 0x0103: /* key error */ | ||
1072 | CIO_CRW_EVENT(2, "Access key error in secm\n"); | ||
1073 | ret = -EINVAL; | ||
1074 | break; | ||
1075 | case 0x0105: /* error while starting */ | ||
1076 | CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); | ||
1077 | ret = -EIO; | ||
1078 | break; | ||
1079 | default: | ||
1080 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
1081 | secm_area->response.code); | ||
1082 | ret = -EIO; | ||
1083 | } | ||
1084 | return ret; | ||
1085 | } | ||
1086 | |||
1087 | int | ||
1088 | chsc_secm(struct channel_subsystem *css, int enable) | ||
1089 | { | ||
1090 | void *secm_area; | ||
1091 | int ret; | ||
1092 | |||
1093 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1094 | if (!secm_area) | ||
1095 | return -ENOMEM; | ||
1096 | |||
1097 | mutex_lock(&css->mutex); | ||
1098 | if (enable && !css->cm_enabled) { | ||
1099 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1100 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1101 | if (!css->cub_addr1 || !css->cub_addr2) { | ||
1102 | free_page((unsigned long)css->cub_addr1); | ||
1103 | free_page((unsigned long)css->cub_addr2); | ||
1104 | free_page((unsigned long)secm_area); | ||
1105 | mutex_unlock(&css->mutex); | ||
1106 | return -ENOMEM; | ||
1107 | } | ||
1108 | } | ||
1109 | ret = __chsc_do_secm(css, enable, secm_area); | ||
1110 | if (!ret) { | ||
1111 | css->cm_enabled = enable; | ||
1112 | if (css->cm_enabled) { | ||
1113 | ret = chsc_add_cmg_attr(css); | ||
1114 | if (ret) { | ||
1115 | memset(secm_area, 0, PAGE_SIZE); | ||
1116 | __chsc_do_secm(css, 0, secm_area); | ||
1117 | css->cm_enabled = 0; | ||
1118 | } | ||
1119 | } else | ||
1120 | chsc_remove_cmg_attr(css); | ||
1121 | } | ||
1122 | if (enable && !css->cm_enabled) { | ||
1123 | free_page((unsigned long)css->cub_addr1); | ||
1124 | free_page((unsigned long)css->cub_addr2); | ||
1125 | } | ||
1126 | mutex_unlock(&css->mutex); | ||
1127 | free_page((unsigned long)secm_area); | ||
1128 | return ret; | ||
1129 | } | ||
1130 | |||
1131 | /* | ||
878 | * Files for the channel path entries. | 1132 | * Files for the channel path entries. |
879 | */ | 1133 | */ |
880 | static ssize_t | 1134 | static ssize_t |
@@ -925,9 +1179,39 @@ chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
925 | 1179 | ||
926 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | 1180 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); |
927 | 1181 | ||
1182 | static ssize_t | ||
1183 | chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1184 | { | ||
1185 | struct channel_path *chp = to_channelpath(dev); | ||
1186 | |||
1187 | if (!chp) | ||
1188 | return 0; | ||
1189 | if (chp->cmg == -1) /* channel measurements not available */ | ||
1190 | return sprintf(buf, "unknown\n"); | ||
1191 | return sprintf(buf, "%x\n", chp->cmg); | ||
1192 | } | ||
1193 | |||
1194 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
1195 | |||
1196 | static ssize_t | ||
1197 | chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1198 | { | ||
1199 | struct channel_path *chp = to_channelpath(dev); | ||
1200 | |||
1201 | if (!chp) | ||
1202 | return 0; | ||
1203 | if (chp->shared == -1) /* channel measurements not available */ | ||
1204 | return sprintf(buf, "unknown\n"); | ||
1205 | return sprintf(buf, "%x\n", chp->shared); | ||
1206 | } | ||
1207 | |||
1208 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
1209 | |||
928 | static struct attribute * chp_attrs[] = { | 1210 | static struct attribute * chp_attrs[] = { |
929 | &dev_attr_status.attr, | 1211 | &dev_attr_status.attr, |
930 | &dev_attr_type.attr, | 1212 | &dev_attr_type.attr, |
1213 | &dev_attr_cmg.attr, | ||
1214 | &dev_attr_shared.attr, | ||
931 | NULL, | 1215 | NULL, |
932 | }; | 1216 | }; |
933 | 1217 | ||
@@ -966,10 +1250,8 @@ chsc_determine_channel_path_description(int chpid, | |||
966 | if (!scpd_area) | 1250 | if (!scpd_area) |
967 | return -ENOMEM; | 1251 | return -ENOMEM; |
968 | 1252 | ||
969 | scpd_area->request = (struct chsc_header) { | 1253 | scpd_area->request.length = 0x0010; |
970 | .length = 0x0010, | 1254 | scpd_area->request.code = 0x0002; |
971 | .code = 0x0002, | ||
972 | }; | ||
973 | 1255 | ||
974 | scpd_area->first_chpid = chpid; | 1256 | scpd_area->first_chpid = chpid; |
975 | scpd_area->last_chpid = chpid; | 1257 | scpd_area->last_chpid = chpid; |
@@ -1006,6 +1288,111 @@ out: | |||
1006 | return ret; | 1288 | return ret; |
1007 | } | 1289 | } |
1008 | 1290 | ||
1291 | static void | ||
1292 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | ||
1293 | struct cmg_chars *chars) | ||
1294 | { | ||
1295 | switch (chp->cmg) { | ||
1296 | case 2: | ||
1297 | case 3: | ||
1298 | chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), | ||
1299 | GFP_KERNEL); | ||
1300 | if (chp->cmg_chars) { | ||
1301 | int i, mask; | ||
1302 | struct cmg_chars *cmg_chars; | ||
1303 | |||
1304 | cmg_chars = chp->cmg_chars; | ||
1305 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { | ||
1306 | mask = 0x80 >> (i + 3); | ||
1307 | if (cmcv & mask) | ||
1308 | cmg_chars->values[i] = chars->values[i]; | ||
1309 | else | ||
1310 | cmg_chars->values[i] = 0; | ||
1311 | } | ||
1312 | } | ||
1313 | break; | ||
1314 | default: | ||
1315 | /* No cmg-dependent data. */ | ||
1316 | break; | ||
1317 | } | ||
1318 | } | ||
1319 | |||
1320 | static int | ||
1321 | chsc_get_channel_measurement_chars(struct channel_path *chp) | ||
1322 | { | ||
1323 | int ccode, ret; | ||
1324 | |||
1325 | struct { | ||
1326 | struct chsc_header request; | ||
1327 | u32 : 24; | ||
1328 | u32 first_chpid : 8; | ||
1329 | u32 : 24; | ||
1330 | u32 last_chpid : 8; | ||
1331 | u32 zeroes1; | ||
1332 | struct chsc_header response; | ||
1333 | u32 zeroes2; | ||
1334 | u32 not_valid : 1; | ||
1335 | u32 shared : 1; | ||
1336 | u32 : 22; | ||
1337 | u32 chpid : 8; | ||
1338 | u32 cmcv : 5; | ||
1339 | u32 : 11; | ||
1340 | u32 cmgq : 8; | ||
1341 | u32 cmg : 8; | ||
1342 | u32 zeroes3; | ||
1343 | u32 data[NR_MEASUREMENT_CHARS]; | ||
1344 | } *scmc_area; | ||
1345 | |||
1346 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1347 | if (!scmc_area) | ||
1348 | return -ENOMEM; | ||
1349 | |||
1350 | scmc_area->request.length = 0x0010; | ||
1351 | scmc_area->request.code = 0x0022; | ||
1352 | |||
1353 | scmc_area->first_chpid = chp->id; | ||
1354 | scmc_area->last_chpid = chp->id; | ||
1355 | |||
1356 | ccode = chsc(scmc_area); | ||
1357 | if (ccode > 0) { | ||
1358 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | ||
1359 | goto out; | ||
1360 | } | ||
1361 | |||
1362 | switch (scmc_area->response.code) { | ||
1363 | case 0x0001: /* Success. */ | ||
1364 | if (!scmc_area->not_valid) { | ||
1365 | chp->cmg = scmc_area->cmg; | ||
1366 | chp->shared = scmc_area->shared; | ||
1367 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | ||
1368 | (struct cmg_chars *) | ||
1369 | &scmc_area->data); | ||
1370 | } else { | ||
1371 | chp->cmg = -1; | ||
1372 | chp->shared = -1; | ||
1373 | } | ||
1374 | ret = 0; | ||
1375 | break; | ||
1376 | case 0x0003: /* Invalid block. */ | ||
1377 | case 0x0007: /* Invalid format. */ | ||
1378 | case 0x0008: /* Invalid bit combination. */ | ||
1379 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
1380 | ret = -EINVAL; | ||
1381 | break; | ||
1382 | case 0x0004: /* Command not provided. */ | ||
1383 | CIO_CRW_EVENT(2, "Model does not provide scmc\n"); | ||
1384 | ret = -EOPNOTSUPP; | ||
1385 | break; | ||
1386 | default: | ||
1387 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
1388 | scmc_area->response.code); | ||
1389 | ret = -EIO; | ||
1390 | } | ||
1391 | out: | ||
1392 | free_page((unsigned long)scmc_area); | ||
1393 | return ret; | ||
1394 | } | ||
1395 | |||
1009 | /* | 1396 | /* |
1010 | * Entries for chpids on the system bus. | 1397 | * Entries for chpids on the system bus. |
1011 | * This replaces /proc/chpids. | 1398 | * This replaces /proc/chpids. |
@@ -1016,10 +1403,9 @@ new_channel_path(int chpid) | |||
1016 | struct channel_path *chp; | 1403 | struct channel_path *chp; |
1017 | int ret; | 1404 | int ret; |
1018 | 1405 | ||
1019 | chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); | 1406 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); |
1020 | if (!chp) | 1407 | if (!chp) |
1021 | return -ENOMEM; | 1408 | return -ENOMEM; |
1022 | memset(chp, 0, sizeof(struct channel_path)); | ||
1023 | 1409 | ||
1024 | /* fill in status, etc. */ | 1410 | /* fill in status, etc. */ |
1025 | chp->id = chpid; | 1411 | chp->id = chpid; |
@@ -1034,6 +1420,22 @@ new_channel_path(int chpid) | |||
1034 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | 1420 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); |
1035 | if (ret) | 1421 | if (ret) |
1036 | goto out_free; | 1422 | goto out_free; |
1423 | /* Get channel-measurement characteristics. */ | ||
1424 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
1425 | && css_chsc_characteristics.secm) { | ||
1426 | ret = chsc_get_channel_measurement_chars(chp); | ||
1427 | if (ret) | ||
1428 | goto out_free; | ||
1429 | } else { | ||
1430 | static int msg_done; | ||
1431 | |||
1432 | if (!msg_done) { | ||
1433 | printk(KERN_WARNING "cio: Channel measurements not " | ||
1434 | "available, continuing.\n"); | ||
1435 | msg_done = 1; | ||
1436 | } | ||
1437 | chp->cmg = -1; | ||
1438 | } | ||
1037 | 1439 | ||
1038 | /* make it known to the system */ | 1440 | /* make it known to the system */ |
1039 | ret = device_register(&chp->dev); | 1441 | ret = device_register(&chp->dev); |
@@ -1046,8 +1448,19 @@ new_channel_path(int chpid) | |||
1046 | if (ret) { | 1448 | if (ret) { |
1047 | device_unregister(&chp->dev); | 1449 | device_unregister(&chp->dev); |
1048 | goto out_free; | 1450 | goto out_free; |
1049 | } else | 1451 | } |
1050 | css[0]->chps[chpid] = chp; | 1452 | mutex_lock(&css[0]->mutex); |
1453 | if (css[0]->cm_enabled) { | ||
1454 | ret = chsc_add_chp_cmg_attr(chp); | ||
1455 | if (ret) { | ||
1456 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
1457 | device_unregister(&chp->dev); | ||
1458 | mutex_unlock(&css[0]->mutex); | ||
1459 | goto out_free; | ||
1460 | } | ||
1461 | } | ||
1462 | css[0]->chps[chpid] = chp; | ||
1463 | mutex_unlock(&css[0]->mutex); | ||
1051 | return ret; | 1464 | return ret; |
1052 | out_free: | 1465 | out_free: |
1053 | kfree(chp); | 1466 | kfree(chp); |
@@ -1103,10 +1516,8 @@ chsc_enable_facility(int operation_code) | |||
1103 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 1516 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); |
1104 | if (!sda_area) | 1517 | if (!sda_area) |
1105 | return -ENOMEM; | 1518 | return -ENOMEM; |
1106 | sda_area->request = (struct chsc_header) { | 1519 | sda_area->request.length = 0x0400; |
1107 | .length = 0x0400, | 1520 | sda_area->request.code = 0x0031; |
1108 | .code = 0x0031, | ||
1109 | }; | ||
1110 | sda_area->operation_code = operation_code; | 1521 | sda_area->operation_code = operation_code; |
1111 | 1522 | ||
1112 | ret = chsc(sda_area); | 1523 | ret = chsc(sda_area); |
@@ -1161,10 +1572,8 @@ chsc_determine_css_characteristics(void) | |||
1161 | return -ENOMEM; | 1572 | return -ENOMEM; |
1162 | } | 1573 | } |
1163 | 1574 | ||
1164 | scsc_area->request = (struct chsc_header) { | 1575 | scsc_area->request.length = 0x0010; |
1165 | .length = 0x0010, | 1576 | scsc_area->request.code = 0x0010; |
1166 | .code = 0x0010, | ||
1167 | }; | ||
1168 | 1577 | ||
1169 | result = chsc(scsc_area); | 1578 | result = chsc(scsc_area); |
1170 | if (result) { | 1579 | if (result) { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 3e75095f35d0..a259245780ae 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -12,6 +12,16 @@ struct chsc_header { | |||
12 | u16 code; | 12 | u16 code; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | #define NR_MEASUREMENT_CHARS 5 | ||
16 | struct cmg_chars { | ||
17 | u32 values[NR_MEASUREMENT_CHARS]; | ||
18 | }; | ||
19 | |||
20 | #define NR_MEASUREMENT_ENTRIES 8 | ||
21 | struct cmg_entry { | ||
22 | u32 values[NR_MEASUREMENT_ENTRIES]; | ||
23 | }; | ||
24 | |||
15 | struct channel_path_desc { | 25 | struct channel_path_desc { |
16 | u8 flags; | 26 | u8 flags; |
17 | u8 lsn; | 27 | u8 lsn; |
@@ -27,6 +37,10 @@ struct channel_path { | |||
27 | int id; | 37 | int id; |
28 | int state; | 38 | int state; |
29 | struct channel_path_desc desc; | 39 | struct channel_path_desc desc; |
40 | /* Channel-measurement related stuff: */ | ||
41 | int cmg; | ||
42 | int shared; | ||
43 | void *cmg_chars; | ||
30 | struct device dev; | 44 | struct device dev; |
31 | }; | 45 | }; |
32 | 46 | ||
@@ -52,7 +66,11 @@ struct css_general_char { | |||
52 | 66 | ||
53 | struct css_chsc_char { | 67 | struct css_chsc_char { |
54 | u64 res; | 68 | u64 res; |
55 | u64 : 43; | 69 | u64 : 20; |
70 | u32 secm : 1; /* bit 84 */ | ||
71 | u32 : 1; | ||
72 | u32 scmc : 1; /* bit 86 */ | ||
73 | u32 : 20; | ||
56 | u32 scssc : 1; /* bit 107 */ | 74 | u32 scssc : 1; /* bit 107 */ |
57 | u32 scsscf : 1; /* bit 108 */ | 75 | u32 scsscf : 1; /* bit 108 */ |
58 | u32 : 19; | 76 | u32 : 19; |
@@ -67,6 +85,8 @@ extern int css_characteristics_avail; | |||
67 | extern void *chsc_get_chp_desc(struct subchannel*, int); | 85 | extern void *chsc_get_chp_desc(struct subchannel*, int); |
68 | 86 | ||
69 | extern int chsc_enable_facility(int); | 87 | extern int chsc_enable_facility(int); |
88 | struct channel_subsystem; | ||
89 | extern int chsc_secm(struct channel_subsystem *, int); | ||
70 | 90 | ||
71 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | 91 | #define to_channelpath(device) container_of(device, struct channel_path, dev) |
72 | 92 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1bbf231f8aaf..74ea8aac4b7d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -409,6 +409,9 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) | |||
409 | /* -ENXIO: no more subchannels. */ | 409 | /* -ENXIO: no more subchannels. */ |
410 | case -ENXIO: | 410 | case -ENXIO: |
411 | return ret; | 411 | return ret; |
412 | /* -EIO: this subchannel set not supported. */ | ||
413 | case -EIO: | ||
414 | return ret; | ||
412 | default: | 415 | default: |
413 | return 0; | 416 | return 0; |
414 | } | 417 | } |
@@ -449,15 +452,50 @@ channel_subsystem_release(struct device *dev) | |||
449 | struct channel_subsystem *css; | 452 | struct channel_subsystem *css; |
450 | 453 | ||
451 | css = to_css(dev); | 454 | css = to_css(dev); |
455 | mutex_destroy(&css->mutex); | ||
452 | kfree(css); | 456 | kfree(css); |
453 | } | 457 | } |
454 | 458 | ||
459 | static ssize_t | ||
460 | css_cm_enable_show(struct device *dev, struct device_attribute *attr, | ||
461 | char *buf) | ||
462 | { | ||
463 | struct channel_subsystem *css = to_css(dev); | ||
464 | |||
465 | if (!css) | ||
466 | return 0; | ||
467 | return sprintf(buf, "%x\n", css->cm_enabled); | ||
468 | } | ||
469 | |||
470 | static ssize_t | ||
471 | css_cm_enable_store(struct device *dev, struct device_attribute *attr, | ||
472 | const char *buf, size_t count) | ||
473 | { | ||
474 | struct channel_subsystem *css = to_css(dev); | ||
475 | int ret; | ||
476 | |||
477 | switch (buf[0]) { | ||
478 | case '0': | ||
479 | ret = css->cm_enabled ? chsc_secm(css, 0) : 0; | ||
480 | break; | ||
481 | case '1': | ||
482 | ret = css->cm_enabled ? 0 : chsc_secm(css, 1); | ||
483 | break; | ||
484 | default: | ||
485 | ret = -EINVAL; | ||
486 | } | ||
487 | return ret < 0 ? ret : count; | ||
488 | } | ||
489 | |||
490 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); | ||
491 | |||
455 | static inline void __init | 492 | static inline void __init |
456 | setup_css(int nr) | 493 | setup_css(int nr) |
457 | { | 494 | { |
458 | u32 tod_high; | 495 | u32 tod_high; |
459 | 496 | ||
460 | memset(css[nr], 0, sizeof(struct channel_subsystem)); | 497 | memset(css[nr], 0, sizeof(struct channel_subsystem)); |
498 | mutex_init(&css[nr]->mutex); | ||
461 | css[nr]->valid = 1; | 499 | css[nr]->valid = 1; |
462 | css[nr]->cssid = nr; | 500 | css[nr]->cssid = nr; |
463 | sprintf(css[nr]->device.bus_id, "css%x", nr); | 501 | sprintf(css[nr]->device.bus_id, "css%x", nr); |
@@ -504,6 +542,9 @@ init_channel_subsystem (void) | |||
504 | ret = device_register(&css[i]->device); | 542 | ret = device_register(&css[i]->device); |
505 | if (ret) | 543 | if (ret) |
506 | goto out_free; | 544 | goto out_free; |
545 | if (css_characteristics_avail && css_chsc_characteristics.secm) | ||
546 | device_create_file(&css[i]->device, | ||
547 | &dev_attr_cm_enable); | ||
507 | } | 548 | } |
508 | css_init_done = 1; | 549 | css_init_done = 1; |
509 | 550 | ||
@@ -516,6 +557,9 @@ out_free: | |||
516 | out_unregister: | 557 | out_unregister: |
517 | while (i > 0) { | 558 | while (i > 0) { |
518 | i--; | 559 | i--; |
560 | if (css_characteristics_avail && css_chsc_characteristics.secm) | ||
561 | device_remove_file(&css[i]->device, | ||
562 | &dev_attr_cm_enable); | ||
519 | device_unregister(&css[i]->device); | 563 | device_unregister(&css[i]->device); |
520 | } | 564 | } |
521 | out_bus: | 565 | out_bus: |
@@ -586,10 +630,9 @@ css_enqueue_subchannel_slow(struct subchannel_id schid) | |||
586 | struct slow_subchannel *new_slow_sch; | 630 | struct slow_subchannel *new_slow_sch; |
587 | unsigned long flags; | 631 | unsigned long flags; |
588 | 632 | ||
589 | new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | 633 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); |
590 | if (!new_slow_sch) | 634 | if (!new_slow_sch) |
591 | return -ENOMEM; | 635 | return -ENOMEM; |
592 | memset(new_slow_sch, 0, sizeof(struct slow_subchannel)); | ||
593 | new_slow_sch->schid = schid; | 636 | new_slow_sch->schid = schid; |
594 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 637 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
595 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | 638 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index b6375861cb37..74a257b23383 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _CSS_H | 1 | #ifndef _CSS_H |
2 | #define _CSS_H | 2 | #define _CSS_H |
3 | 3 | ||
4 | #include <linux/mutex.h> | ||
4 | #include <linux/wait.h> | 5 | #include <linux/wait.h> |
5 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
6 | 7 | ||
@@ -150,6 +151,11 @@ struct channel_subsystem { | |||
150 | struct channel_path *chps[__MAX_CHPID + 1]; | 151 | struct channel_path *chps[__MAX_CHPID + 1]; |
151 | struct device device; | 152 | struct device device; |
152 | struct pgid global_pgid; | 153 | struct pgid global_pgid; |
154 | struct mutex mutex; | ||
155 | /* channel measurement related */ | ||
156 | int cm_enabled; | ||
157 | void *cub_addr1; | ||
158 | void *cub_addr2; | ||
153 | }; | 159 | }; |
154 | #define to_css(dev) container_of(dev, struct channel_subsystem, device) | 160 | #define to_css(dev) container_of(dev, struct channel_subsystem, device) |
155 | 161 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index afc4e88551ad..8e3053c2a451 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -826,17 +826,15 @@ io_subchannel_probe (struct subchannel *sch) | |||
826 | get_device(&cdev->dev); | 826 | get_device(&cdev->dev); |
827 | return 0; | 827 | return 0; |
828 | } | 828 | } |
829 | cdev = kmalloc (sizeof(*cdev), GFP_KERNEL); | 829 | cdev = kzalloc (sizeof(*cdev), GFP_KERNEL); |
830 | if (!cdev) | 830 | if (!cdev) |
831 | return -ENOMEM; | 831 | return -ENOMEM; |
832 | memset(cdev, 0, sizeof(struct ccw_device)); | 832 | cdev->private = kzalloc(sizeof(struct ccw_device_private), |
833 | cdev->private = kmalloc(sizeof(struct ccw_device_private), | ||
834 | GFP_KERNEL | GFP_DMA); | 833 | GFP_KERNEL | GFP_DMA); |
835 | if (!cdev->private) { | 834 | if (!cdev->private) { |
836 | kfree(cdev); | 835 | kfree(cdev); |
837 | return -ENOMEM; | 836 | return -ENOMEM; |
838 | } | 837 | } |
839 | memset(cdev->private, 0, sizeof(struct ccw_device_private)); | ||
840 | atomic_set(&cdev->private->onoff, 0); | 838 | atomic_set(&cdev->private->onoff, 0); |
841 | cdev->dev = (struct device) { | 839 | cdev->dev = (struct device) { |
842 | .parent = &sch->dev, | 840 | .parent = &sch->dev, |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index b302779e7cff..180b3bf8b90d 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -827,6 +827,17 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
827 | } | 827 | } |
828 | return; | 828 | return; |
829 | } | 829 | } |
830 | /* | ||
831 | * Check if a halt or clear has been issued in the meanwhile. If yes, | ||
832 | * only deliver the halt/clear interrupt to the device driver as if it | ||
833 | * had killed the original request. | ||
834 | */ | ||
835 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | ||
836 | cdev->private->flags.dosense = 0; | ||
837 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
838 | ccw_device_accumulate_irb(cdev, irb); | ||
839 | goto call_handler; | ||
840 | } | ||
830 | /* Add basic sense info to irb. */ | 841 | /* Add basic sense info to irb. */ |
831 | ccw_device_accumulate_basic_sense(cdev, irb); | 842 | ccw_device_accumulate_basic_sense(cdev, irb); |
832 | if (cdev->private->flags.dosense) { | 843 | if (cdev->private->flags.dosense) { |
@@ -834,6 +845,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
834 | ccw_device_do_sense(cdev, irb); | 845 | ccw_device_do_sense(cdev, irb); |
835 | return; | 846 | return; |
836 | } | 847 | } |
848 | call_handler: | ||
837 | cdev->private->state = DEV_STATE_ONLINE; | 849 | cdev->private->state = DEV_STATE_ONLINE; |
838 | /* Call the handler. */ | 850 | /* Call the handler. */ |
839 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) | 851 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 3a50b1903287..795abb5a65ba 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -359,10 +359,9 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) | |||
359 | CIO_TRACE_EVENT (4, "rddevch"); | 359 | CIO_TRACE_EVENT (4, "rddevch"); |
360 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | 360 | CIO_TRACE_EVENT (4, sch->dev.bus_id); |
361 | 361 | ||
362 | rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); | 362 | rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); |
363 | if (!rdc_ccw) | 363 | if (!rdc_ccw) |
364 | return -ENOMEM; | 364 | return -ENOMEM; |
365 | memset(rdc_ccw, 0, sizeof(struct ccw1)); | ||
366 | rdc_ccw->cmd_code = CCW_CMD_RDC; | 365 | rdc_ccw->cmd_code = CCW_CMD_RDC; |
367 | rdc_ccw->count = length; | 366 | rdc_ccw->count = length; |
368 | rdc_ccw->flags = CCW_FLAG_SLI; | 367 | rdc_ccw->flags = CCW_FLAG_SLI; |
@@ -426,16 +425,14 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp | |||
426 | if (!ciw || ciw->cmd == 0) | 425 | if (!ciw || ciw->cmd == 0) |
427 | return -EOPNOTSUPP; | 426 | return -EOPNOTSUPP; |
428 | 427 | ||
429 | rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); | 428 | rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); |
430 | if (!rcd_ccw) | 429 | if (!rcd_ccw) |
431 | return -ENOMEM; | 430 | return -ENOMEM; |
432 | memset(rcd_ccw, 0, sizeof(struct ccw1)); | 431 | rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); |
433 | rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA); | ||
434 | if (!rcd_buf) { | 432 | if (!rcd_buf) { |
435 | kfree(rcd_ccw); | 433 | kfree(rcd_ccw); |
436 | return -ENOMEM; | 434 | return -ENOMEM; |
437 | } | 435 | } |
438 | memset (rcd_buf, 0, ciw->count); | ||
439 | rcd_ccw->cmd_code = ciw->cmd; | 436 | rcd_ccw->cmd_code = ciw->cmd; |
440 | rcd_ccw->cda = (__u32) __pa (rcd_buf); | 437 | rcd_ccw->cda = (__u32) __pa (rcd_buf); |
441 | rcd_ccw->count = ciw->count; | 438 | rcd_ccw->count = ciw->count; |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 9ed37dc9a1b0..814f9258ce00 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -1686,16 +1686,14 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr, | |||
1686 | int result=-ENOMEM; | 1686 | int result=-ENOMEM; |
1687 | 1687 | ||
1688 | for (i=0;i<no_input_qs;i++) { | 1688 | for (i=0;i<no_input_qs;i++) { |
1689 | q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); | 1689 | q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); |
1690 | 1690 | ||
1691 | if (!q) { | 1691 | if (!q) { |
1692 | QDIO_PRINT_ERR("kmalloc of q failed!\n"); | 1692 | QDIO_PRINT_ERR("kmalloc of q failed!\n"); |
1693 | goto out; | 1693 | goto out; |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | memset(q,0,sizeof(struct qdio_q)); | 1696 | q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1697 | |||
1698 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); | ||
1699 | if (!q->slib) { | 1697 | if (!q->slib) { |
1700 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | 1698 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); |
1701 | goto out; | 1699 | goto out; |
@@ -1705,14 +1703,12 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr, | |||
1705 | } | 1703 | } |
1706 | 1704 | ||
1707 | for (i=0;i<no_output_qs;i++) { | 1705 | for (i=0;i<no_output_qs;i++) { |
1708 | q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); | 1706 | q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); |
1709 | 1707 | ||
1710 | if (!q) { | 1708 | if (!q) { |
1711 | goto out; | 1709 | goto out; |
1712 | } | 1710 | } |
1713 | 1711 | ||
1714 | memset(q,0,sizeof(struct qdio_q)); | ||
1715 | |||
1716 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); | 1712 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); |
1717 | if (!q->slib) { | 1713 | if (!q->slib) { |
1718 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | 1714 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); |
@@ -2984,7 +2980,7 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
2984 | qdio_allocate_do_dbf(init_data); | 2980 | qdio_allocate_do_dbf(init_data); |
2985 | 2981 | ||
2986 | /* create irq */ | 2982 | /* create irq */ |
2987 | irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); | 2983 | irq_ptr = kzalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); |
2988 | 2984 | ||
2989 | QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); | 2985 | QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); |
2990 | QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); | 2986 | QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); |
@@ -2994,8 +2990,6 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
2994 | return -ENOMEM; | 2990 | return -ENOMEM; |
2995 | } | 2991 | } |
2996 | 2992 | ||
2997 | memset(irq_ptr,0,sizeof(struct qdio_irq)); | ||
2998 | |||
2999 | init_MUTEX(&irq_ptr->setting_up_sema); | 2993 | init_MUTEX(&irq_ptr->setting_up_sema); |
3000 | 2994 | ||
3001 | /* QDR must be in DMA area since CCW data address is only 32 bit */ | 2995 | /* QDR must be in DMA area since CCW data address is only 32 bit */ |
@@ -3686,10 +3680,10 @@ qdio_get_qdio_memory(void) | |||
3686 | 3680 | ||
3687 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) | 3681 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) |
3688 | indicator_used[i]=0; | 3682 | indicator_used[i]=0; |
3689 | indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), | 3683 | indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), |
3690 | GFP_KERNEL); | 3684 | GFP_KERNEL); |
3691 | if (!indicators) return -ENOMEM; | 3685 | if (!indicators) |
3692 | memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE)); | 3686 | return -ENOMEM; |
3693 | return 0; | 3687 | return 0; |
3694 | } | 3688 | } |
3695 | 3689 | ||
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c index 4141919da805..be60795f4a74 100644 --- a/drivers/s390/crypto/z90hardware.c +++ b/drivers/s390/crypto/z90hardware.c | |||
@@ -2214,7 +2214,7 @@ ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, | |||
2214 | long_len = 128; | 2214 | long_len = 128; |
2215 | } | 2215 | } |
2216 | 2216 | ||
2217 | tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + | 2217 | tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + |
2218 | CALLER_HEADER; | 2218 | CALLER_HEADER; |
2219 | 2219 | ||
2220 | memset(z90cMsg_p, 0, tmp_size); | 2220 | memset(z90cMsg_p, 0, tmp_size); |
@@ -2479,8 +2479,16 @@ convert_response(unsigned char *response, unsigned char *buffer, | |||
2479 | 2479 | ||
2480 | if (reply_code) | 2480 | if (reply_code) |
2481 | switch (reply_code) { | 2481 | switch (reply_code) { |
2482 | case REP82_ERROR_MACHINE_FAILURE: | ||
2483 | if (errh_p->type == TYPE82_RSP_CODE) | ||
2484 | PRINTKW("Machine check failure\n"); | ||
2485 | else | ||
2486 | PRINTKW("Module failure\n"); | ||
2487 | return REC_HARDWAR_ERR; | ||
2482 | case REP82_ERROR_OPERAND_INVALID: | 2488 | case REP82_ERROR_OPERAND_INVALID: |
2489 | return REC_OPERAND_INV; | ||
2483 | case REP88_ERROR_MESSAGE_MALFORMD: | 2490 | case REP88_ERROR_MESSAGE_MALFORMD: |
2491 | PRINTKW("Message malformed\n"); | ||
2484 | return REC_OPERAND_INV; | 2492 | return REC_OPERAND_INV; |
2485 | case REP82_ERROR_OPERAND_SIZE: | 2493 | case REP82_ERROR_OPERAND_SIZE: |
2486 | return REC_OPERAND_SIZE; | 2494 | return REC_OPERAND_SIZE; |
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c index 7d6f19030ef9..982acc7303ea 100644 --- a/drivers/s390/crypto/z90main.c +++ b/drivers/s390/crypto/z90main.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/s390/crypto/z90main.c | 2 | * linux/drivers/s390/crypto/z90main.c |
3 | * | 3 | * |
4 | * z90crypt 1.3.2 | 4 | * z90crypt 1.3.3 |
5 | * | 5 | * |
6 | * Copyright (C) 2001, 2004 IBM Corporation | 6 | * Copyright (C) 2001, 2005 IBM Corporation |
7 | * Author(s): Robert Burroughs (burrough@us.ibm.com) | 7 | * Author(s): Robert Burroughs (burrough@us.ibm.com) |
8 | * Eric Rossman (edrossma@us.ibm.com) | 8 | * Eric Rossman (edrossma@us.ibm.com) |
9 | * | 9 | * |
@@ -707,13 +707,12 @@ z90crypt_open(struct inode *inode, struct file *filp) | |||
707 | if (quiesce_z90crypt) | 707 | if (quiesce_z90crypt) |
708 | return -EQUIESCE; | 708 | return -EQUIESCE; |
709 | 709 | ||
710 | private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL); | 710 | private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL); |
711 | if (!private_data_p) { | 711 | if (!private_data_p) { |
712 | PRINTK("Memory allocate failed\n"); | 712 | PRINTK("Memory allocate failed\n"); |
713 | return -ENOMEM; | 713 | return -ENOMEM; |
714 | } | 714 | } |
715 | 715 | ||
716 | memset((void *)private_data_p, 0, sizeof(struct priv_data)); | ||
717 | private_data_p->status = STAT_OPEN; | 716 | private_data_p->status = STAT_OPEN; |
718 | private_data_p->opener_pid = PID(); | 717 | private_data_p->opener_pid = PID(); |
719 | filp->private_data = private_data_p; | 718 | filp->private_data = private_data_p; |
@@ -991,6 +990,7 @@ remove_device(struct device *device_p) | |||
991 | * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card) | 990 | * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card) |
992 | * PCIXCC_MCL3 ----- 128-2048 | 991 | * PCIXCC_MCL3 ----- 128-2048 |
993 | * CEX2C 512-2048 128-2048 | 992 | * CEX2C 512-2048 128-2048 |
993 | * CEX2A ??-2048 same (the lower limit is less than 128 bit...) | ||
994 | * | 994 | * |
995 | * ext_bitlens (extended bitlengths) is a global, since you should not apply an | 995 | * ext_bitlens (extended bitlengths) is a global, since you should not apply an |
996 | * MCL to just one card in a machine. We assume, at first, that all cards have | 996 | * MCL to just one card in a machine. We assume, at first, that all cards have |
@@ -2736,13 +2736,11 @@ create_z90crypt(int *cdx_p) | |||
2736 | z90crypt.max_count = Z90CRYPT_NUM_DEVS; | 2736 | z90crypt.max_count = Z90CRYPT_NUM_DEVS; |
2737 | z90crypt.cdx = *cdx_p; | 2737 | z90crypt.cdx = *cdx_p; |
2738 | 2738 | ||
2739 | hdware_blk_p = (struct hdware_block *) | 2739 | hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC); |
2740 | kmalloc(sizeof(struct hdware_block), GFP_ATOMIC); | ||
2741 | if (!hdware_blk_p) { | 2740 | if (!hdware_blk_p) { |
2742 | PDEBUG("kmalloc for hardware block failed\n"); | 2741 | PDEBUG("kmalloc for hardware block failed\n"); |
2743 | return ENOMEM; | 2742 | return ENOMEM; |
2744 | } | 2743 | } |
2745 | memset(hdware_blk_p, 0x00, sizeof(struct hdware_block)); | ||
2746 | z90crypt.hdware_info = hdware_blk_p; | 2744 | z90crypt.hdware_info = hdware_blk_p; |
2747 | 2745 | ||
2748 | return 0; | 2746 | return 0; |
@@ -2977,12 +2975,11 @@ create_crypto_device(int index) | |||
2977 | total_size = sizeof(struct device) + | 2975 | total_size = sizeof(struct device) + |
2978 | z90crypt.q_depth_array[index] * sizeof(int); | 2976 | z90crypt.q_depth_array[index] * sizeof(int); |
2979 | 2977 | ||
2980 | dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC); | 2978 | dev_ptr = kzalloc(total_size, GFP_ATOMIC); |
2981 | if (!dev_ptr) { | 2979 | if (!dev_ptr) { |
2982 | PRINTK("kmalloc device %d failed\n", index); | 2980 | PRINTK("kmalloc device %d failed\n", index); |
2983 | return ENOMEM; | 2981 | return ENOMEM; |
2984 | } | 2982 | } |
2985 | memset(dev_ptr, 0, total_size); | ||
2986 | dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC); | 2983 | dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC); |
2987 | if (!dev_ptr->dev_resp_p) { | 2984 | if (!dev_ptr->dev_resp_p) { |
2988 | kfree(dev_ptr); | 2985 | kfree(dev_ptr); |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index a86436a7a606..23d53bf9daf1 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -310,7 +310,7 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
310 | printk(KERN_INFO "claw: variable cgdev =\n"); | 310 | printk(KERN_INFO "claw: variable cgdev =\n"); |
311 | dumpit((char *)cgdev, sizeof(struct ccwgroup_device)); | 311 | dumpit((char *)cgdev, sizeof(struct ccwgroup_device)); |
312 | #endif | 312 | #endif |
313 | privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL); | 313 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); |
314 | if (privptr == NULL) { | 314 | if (privptr == NULL) { |
315 | probe_error(cgdev); | 315 | probe_error(cgdev); |
316 | put_device(&cgdev->dev); | 316 | put_device(&cgdev->dev); |
@@ -319,7 +319,6 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
319 | CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); | 319 | CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); |
320 | return -ENOMEM; | 320 | return -ENOMEM; |
321 | } | 321 | } |
322 | memset(privptr,0x00,sizeof(struct claw_privbk)); | ||
323 | privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); | 322 | privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); |
324 | privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL); | 323 | privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL); |
325 | if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { | 324 | if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { |
@@ -1404,7 +1403,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1404 | 1403 | ||
1405 | if ( privptr-> p_read_active_first ==NULL ) { | 1404 | if ( privptr-> p_read_active_first ==NULL ) { |
1406 | #ifdef DEBUGMSG | 1405 | #ifdef DEBUGMSG |
1407 | printk(KERN_INFO "%s:%s p_read_active_frist == NULL \n", | 1406 | printk(KERN_INFO "%s:%s p_read_active_first == NULL \n", |
1408 | dev->name,__FUNCTION__); | 1407 | dev->name,__FUNCTION__); |
1409 | printk(KERN_INFO "%s:%s Read active first/last changed \n", | 1408 | printk(KERN_INFO "%s:%s Read active first/last changed \n", |
1410 | dev->name,__FUNCTION__); | 1409 | dev->name,__FUNCTION__); |
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index 6caf5fa6a3b5..7145e2134cf0 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c | |||
@@ -21,38 +21,34 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_ | |||
21 | fsm_function_t *m; | 21 | fsm_function_t *m; |
22 | fsm *f; | 22 | fsm *f; |
23 | 23 | ||
24 | this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order); | 24 | this = kzalloc(sizeof(fsm_instance), order); |
25 | if (this == NULL) { | 25 | if (this == NULL) { |
26 | printk(KERN_WARNING | 26 | printk(KERN_WARNING |
27 | "fsm(%s): init_fsm: Couldn't alloc instance\n", name); | 27 | "fsm(%s): init_fsm: Couldn't alloc instance\n", name); |
28 | return NULL; | 28 | return NULL; |
29 | } | 29 | } |
30 | memset(this, 0, sizeof(fsm_instance)); | ||
31 | strlcpy(this->name, name, sizeof(this->name)); | 30 | strlcpy(this->name, name, sizeof(this->name)); |
32 | 31 | ||
33 | f = (fsm *)kmalloc(sizeof(fsm), order); | 32 | f = kzalloc(sizeof(fsm), order); |
34 | if (f == NULL) { | 33 | if (f == NULL) { |
35 | printk(KERN_WARNING | 34 | printk(KERN_WARNING |
36 | "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); | 35 | "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); |
37 | kfree_fsm(this); | 36 | kfree_fsm(this); |
38 | return NULL; | 37 | return NULL; |
39 | } | 38 | } |
40 | memset(f, 0, sizeof(fsm)); | ||
41 | f->nr_events = nr_events; | 39 | f->nr_events = nr_events; |
42 | f->nr_states = nr_states; | 40 | f->nr_states = nr_states; |
43 | f->event_names = event_names; | 41 | f->event_names = event_names; |
44 | f->state_names = state_names; | 42 | f->state_names = state_names; |
45 | this->f = f; | 43 | this->f = f; |
46 | 44 | ||
47 | m = (fsm_function_t *)kmalloc( | 45 | m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); |
48 | sizeof(fsm_function_t) * nr_states * nr_events, order); | ||
49 | if (m == NULL) { | 46 | if (m == NULL) { |
50 | printk(KERN_WARNING | 47 | printk(KERN_WARNING |
51 | "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); | 48 | "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); |
52 | kfree_fsm(this); | 49 | kfree_fsm(this); |
53 | return NULL; | 50 | return NULL; |
54 | } | 51 | } |
55 | memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events); | ||
56 | f->jumpmatrix = m; | 52 | f->jumpmatrix = m; |
57 | 53 | ||
58 | for (i = 0; i < tmpl_len; i++) { | 54 | for (i = 0; i < tmpl_len; i++) { |
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index 760e77ec5a11..6190be9dca99 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c | |||
@@ -386,7 +386,7 @@ iucv_init(void) | |||
386 | } | 386 | } |
387 | 387 | ||
388 | /* Note: GFP_DMA used used to get memory below 2G */ | 388 | /* Note: GFP_DMA used used to get memory below 2G */ |
389 | iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt), | 389 | iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt), |
390 | GFP_KERNEL|GFP_DMA); | 390 | GFP_KERNEL|GFP_DMA); |
391 | if (!iucv_external_int_buffer) { | 391 | if (!iucv_external_int_buffer) { |
392 | printk(KERN_WARNING | 392 | printk(KERN_WARNING |
@@ -396,10 +396,9 @@ iucv_init(void) | |||
396 | bus_unregister(&iucv_bus); | 396 | bus_unregister(&iucv_bus); |
397 | return -ENOMEM; | 397 | return -ENOMEM; |
398 | } | 398 | } |
399 | memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt)); | ||
400 | 399 | ||
401 | /* Initialize parameter pool */ | 400 | /* Initialize parameter pool */ |
402 | iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, | 401 | iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, |
403 | GFP_KERNEL|GFP_DMA); | 402 | GFP_KERNEL|GFP_DMA); |
404 | if (!iucv_param_pool) { | 403 | if (!iucv_param_pool) { |
405 | printk(KERN_WARNING "%s: Could not allocate param pool\n", | 404 | printk(KERN_WARNING "%s: Could not allocate param pool\n", |
@@ -410,7 +409,6 @@ iucv_init(void) | |||
410 | bus_unregister(&iucv_bus); | 409 | bus_unregister(&iucv_bus); |
411 | return -ENOMEM; | 410 | return -ENOMEM; |
412 | } | 411 | } |
413 | memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE); | ||
414 | 412 | ||
415 | /* Initialize irq queue */ | 413 | /* Initialize irq queue */ |
416 | INIT_LIST_HEAD(&iucv_irq_queue); | 414 | INIT_LIST_HEAD(&iucv_irq_queue); |
@@ -793,15 +791,14 @@ iucv_register_program (__u8 pgmname[16], | |||
793 | } | 791 | } |
794 | 792 | ||
795 | max_connections = iucv_query_maxconn(); | 793 | max_connections = iucv_query_maxconn(); |
796 | iucv_pathid_table = kmalloc(max_connections * sizeof(handler *), | 794 | iucv_pathid_table = kcalloc(max_connections, sizeof(handler *), |
797 | GFP_ATOMIC); | 795 | GFP_ATOMIC); |
798 | if (iucv_pathid_table == NULL) { | 796 | if (iucv_pathid_table == NULL) { |
799 | printk(KERN_WARNING "%s: iucv_pathid_table storage " | 797 | printk(KERN_WARNING "%s: iucv_pathid_table storage " |
800 | "allocation failed\n", __FUNCTION__); | 798 | "allocation failed\n", __FUNCTION__); |
801 | kfree(new_handler); | 799 | kfree(new_handler); |
802 | return NULL; | 800 | return NULL; |
803 | } | 801 | } |
804 | memset (iucv_pathid_table, 0, max_connections * sizeof(handler *)); | ||
805 | } | 802 | } |
806 | memset(new_handler, 0, sizeof (handler)); | 803 | memset(new_handler, 0, sizeof (handler)); |
807 | memcpy(new_handler->id.user_data, pgmname, | 804 | memcpy(new_handler->id.user_data, pgmname, |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 9cf88d7201d3..edcf05d5d568 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -115,11 +115,10 @@ lcs_alloc_channel(struct lcs_channel *channel) | |||
115 | LCS_DBF_TEXT(2, setup, "ichalloc"); | 115 | LCS_DBF_TEXT(2, setup, "ichalloc"); |
116 | for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { | 116 | for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { |
117 | /* alloc memory fo iobuffer */ | 117 | /* alloc memory fo iobuffer */ |
118 | channel->iob[cnt].data = (void *) | 118 | channel->iob[cnt].data = |
119 | kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); | 119 | kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); |
120 | if (channel->iob[cnt].data == NULL) | 120 | if (channel->iob[cnt].data == NULL) |
121 | break; | 121 | break; |
122 | memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE); | ||
123 | channel->iob[cnt].state = BUF_STATE_EMPTY; | 122 | channel->iob[cnt].state = BUF_STATE_EMPTY; |
124 | } | 123 | } |
125 | if (cnt < LCS_NUM_BUFFS) { | 124 | if (cnt < LCS_NUM_BUFFS) { |
@@ -182,10 +181,9 @@ lcs_alloc_card(void) | |||
182 | 181 | ||
183 | LCS_DBF_TEXT(2, setup, "alloclcs"); | 182 | LCS_DBF_TEXT(2, setup, "alloclcs"); |
184 | 183 | ||
185 | card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); | 184 | card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); |
186 | if (card == NULL) | 185 | if (card == NULL) |
187 | return NULL; | 186 | return NULL; |
188 | memset(card, 0, sizeof(struct lcs_card)); | ||
189 | card->lan_type = LCS_FRAME_TYPE_AUTO; | 187 | card->lan_type = LCS_FRAME_TYPE_AUTO; |
190 | card->pkt_seq = 0; | 188 | card->pkt_seq = 0; |
191 | card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; | 189 | card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; |
@@ -793,10 +791,9 @@ lcs_alloc_reply(struct lcs_cmd *cmd) | |||
793 | 791 | ||
794 | LCS_DBF_TEXT(4, trace, "getreply"); | 792 | LCS_DBF_TEXT(4, trace, "getreply"); |
795 | 793 | ||
796 | reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC); | 794 | reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); |
797 | if (!reply) | 795 | if (!reply) |
798 | return NULL; | 796 | return NULL; |
799 | memset(reply,0,sizeof(struct lcs_reply)); | ||
800 | atomic_set(&reply->refcnt,1); | 797 | atomic_set(&reply->refcnt,1); |
801 | reply->sequence_no = cmd->sequence_no; | 798 | reply->sequence_no = cmd->sequence_no; |
802 | reply->received = 0; | 799 | reply->received = 0; |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 71d3853e8682..260a93c8c442 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1728,14 +1728,13 @@ static int | |||
1728 | netiucv_register_device(struct net_device *ndev) | 1728 | netiucv_register_device(struct net_device *ndev) |
1729 | { | 1729 | { |
1730 | struct netiucv_priv *priv = ndev->priv; | 1730 | struct netiucv_priv *priv = ndev->priv; |
1731 | struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL); | 1731 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
1732 | int ret; | 1732 | int ret; |
1733 | 1733 | ||
1734 | 1734 | ||
1735 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1735 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1736 | 1736 | ||
1737 | if (dev) { | 1737 | if (dev) { |
1738 | memset(dev, 0, sizeof(struct device)); | ||
1739 | snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); | 1738 | snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); |
1740 | dev->bus = &iucv_bus; | 1739 | dev->bus = &iucv_bus; |
1741 | dev->parent = iucv_root; | 1740 | dev->parent = iucv_root; |
@@ -1784,11 +1783,9 @@ netiucv_new_connection(struct net_device *dev, char *username) | |||
1784 | { | 1783 | { |
1785 | struct iucv_connection **clist = &iucv_connections; | 1784 | struct iucv_connection **clist = &iucv_connections; |
1786 | struct iucv_connection *conn = | 1785 | struct iucv_connection *conn = |
1787 | (struct iucv_connection *) | 1786 | kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); |
1788 | kmalloc(sizeof(struct iucv_connection), GFP_KERNEL); | ||
1789 | 1787 | ||
1790 | if (conn) { | 1788 | if (conn) { |
1791 | memset(conn, 0, sizeof(struct iucv_connection)); | ||
1792 | skb_queue_head_init(&conn->collect_queue); | 1789 | skb_queue_head_init(&conn->collect_queue); |
1793 | skb_queue_head_init(&conn->commit_queue); | 1790 | skb_queue_head_init(&conn->commit_queue); |
1794 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | 1791 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 82cb4af2f0e7..44e226f211e7 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -389,9 +389,8 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | |||
389 | struct qeth_eddp_data *eddp; | 389 | struct qeth_eddp_data *eddp; |
390 | 390 | ||
391 | QETH_DBF_TEXT(trace, 5, "eddpcrda"); | 391 | QETH_DBF_TEXT(trace, 5, "eddpcrda"); |
392 | eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); | 392 | eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); |
393 | if (eddp){ | 393 | if (eddp){ |
394 | memset(eddp, 0, sizeof(struct qeth_eddp_data)); | ||
395 | eddp->nhl = nhl; | 394 | eddp->nhl = nhl; |
396 | eddp->thl = thl; | 395 | eddp->thl = thl; |
397 | memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); | 396 | memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); |
@@ -542,12 +541,11 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | |||
542 | 541 | ||
543 | QETH_DBF_TEXT(trace, 5, "creddpcg"); | 542 | QETH_DBF_TEXT(trace, 5, "creddpcg"); |
544 | /* create the context and allocate pages */ | 543 | /* create the context and allocate pages */ |
545 | ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); | 544 | ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); |
546 | if (ctx == NULL){ | 545 | if (ctx == NULL){ |
547 | QETH_DBF_TEXT(trace, 2, "ceddpcn1"); | 546 | QETH_DBF_TEXT(trace, 2, "ceddpcn1"); |
548 | return NULL; | 547 | return NULL; |
549 | } | 548 | } |
550 | memset(ctx, 0, sizeof(struct qeth_eddp_context)); | ||
551 | ctx->type = QETH_LARGE_SEND_EDDP; | 549 | ctx->type = QETH_LARGE_SEND_EDDP; |
552 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); | 550 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); |
553 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ | 551 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ |
@@ -555,13 +553,12 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | |||
555 | kfree(ctx); | 553 | kfree(ctx); |
556 | return NULL; | 554 | return NULL; |
557 | } | 555 | } |
558 | ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC); | 556 | ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); |
559 | if (ctx->pages == NULL){ | 557 | if (ctx->pages == NULL){ |
560 | QETH_DBF_TEXT(trace, 2, "ceddpcn2"); | 558 | QETH_DBF_TEXT(trace, 2, "ceddpcn2"); |
561 | kfree(ctx); | 559 | kfree(ctx); |
562 | return NULL; | 560 | return NULL; |
563 | } | 561 | } |
564 | memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *)); | ||
565 | for (i = 0; i < ctx->num_pages; ++i){ | 562 | for (i = 0; i < ctx->num_pages; ++i){ |
566 | addr = (u8 *)__get_free_page(GFP_ATOMIC); | 563 | addr = (u8 *)__get_free_page(GFP_ATOMIC); |
567 | if (addr == NULL){ | 564 | if (addr == NULL){ |
@@ -573,15 +570,13 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | |||
573 | memset(addr, 0, PAGE_SIZE); | 570 | memset(addr, 0, PAGE_SIZE); |
574 | ctx->pages[i] = addr; | 571 | ctx->pages[i] = addr; |
575 | } | 572 | } |
576 | ctx->elements = kmalloc(ctx->num_elements * | 573 | ctx->elements = kcalloc(ctx->num_elements, |
577 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); | 574 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); |
578 | if (ctx->elements == NULL){ | 575 | if (ctx->elements == NULL){ |
579 | QETH_DBF_TEXT(trace, 2, "ceddpcn4"); | 576 | QETH_DBF_TEXT(trace, 2, "ceddpcn4"); |
580 | qeth_eddp_free_context(ctx); | 577 | qeth_eddp_free_context(ctx); |
581 | return NULL; | 578 | return NULL; |
582 | } | 579 | } |
583 | memset(ctx->elements, 0, | ||
584 | ctx->num_elements * sizeof(struct qeth_eddp_element)); | ||
585 | /* reset num_elements; will be incremented again in fill_buffer to | 580 | /* reset num_elements; will be incremented again in fill_buffer to |
586 | * reflect number of actually used elements */ | 581 | * reflect number of actually used elements */ |
587 | ctx->num_elements = 0; | 582 | ctx->num_elements = 0; |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index dba7f7f02e79..b3c6e7907790 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -297,12 +297,10 @@ qeth_alloc_card(void) | |||
297 | struct qeth_card *card; | 297 | struct qeth_card *card; |
298 | 298 | ||
299 | QETH_DBF_TEXT(setup, 2, "alloccrd"); | 299 | QETH_DBF_TEXT(setup, 2, "alloccrd"); |
300 | card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card), | 300 | card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); |
301 | GFP_DMA|GFP_KERNEL); | ||
302 | if (!card) | 301 | if (!card) |
303 | return NULL; | 302 | return NULL; |
304 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 303 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); |
305 | memset(card, 0, sizeof(struct qeth_card)); | ||
306 | if (qeth_setup_channel(&card->read)) { | 304 | if (qeth_setup_channel(&card->read)) { |
307 | kfree(card); | 305 | kfree(card); |
308 | return NULL; | 306 | return NULL; |
@@ -1364,7 +1362,7 @@ qeth_wait_for_buffer(struct qeth_channel *channel) | |||
1364 | static void | 1362 | static void |
1365 | qeth_clear_cmd_buffers(struct qeth_channel *channel) | 1363 | qeth_clear_cmd_buffers(struct qeth_channel *channel) |
1366 | { | 1364 | { |
1367 | int cnt = 0; | 1365 | int cnt; |
1368 | 1366 | ||
1369 | for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) | 1367 | for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) |
1370 | qeth_release_buffer(channel,&channel->iob[cnt]); | 1368 | qeth_release_buffer(channel,&channel->iob[cnt]); |
@@ -1632,9 +1630,8 @@ qeth_alloc_reply(struct qeth_card *card) | |||
1632 | { | 1630 | { |
1633 | struct qeth_reply *reply; | 1631 | struct qeth_reply *reply; |
1634 | 1632 | ||
1635 | reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC); | 1633 | reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); |
1636 | if (reply){ | 1634 | if (reply){ |
1637 | memset(reply, 0, sizeof(struct qeth_reply)); | ||
1638 | atomic_set(&reply->refcnt, 1); | 1635 | atomic_set(&reply->refcnt, 1); |
1639 | reply->card = card; | 1636 | reply->card = card; |
1640 | }; | 1637 | }; |
@@ -2814,11 +2811,11 @@ qeth_handle_send_error(struct qeth_card *card, | |||
2814 | QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); | 2811 | QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); |
2815 | return QETH_SEND_ERROR_LINK_FAILURE; | 2812 | return QETH_SEND_ERROR_LINK_FAILURE; |
2816 | case 3: | 2813 | case 3: |
2814 | default: | ||
2817 | QETH_DBF_TEXT(trace, 1, "SIGAcc3"); | 2815 | QETH_DBF_TEXT(trace, 1, "SIGAcc3"); |
2818 | QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); | 2816 | QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); |
2819 | return QETH_SEND_ERROR_KICK_IT; | 2817 | return QETH_SEND_ERROR_KICK_IT; |
2820 | } | 2818 | } |
2821 | return QETH_SEND_ERROR_LINK_FAILURE; | ||
2822 | } | 2819 | } |
2823 | 2820 | ||
2824 | void | 2821 | void |
@@ -3348,13 +3345,11 @@ qeth_qdio_establish(struct qeth_card *card) | |||
3348 | 3345 | ||
3349 | QETH_DBF_TEXT(setup, 2, "qdioest"); | 3346 | QETH_DBF_TEXT(setup, 2, "qdioest"); |
3350 | 3347 | ||
3351 | qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), | 3348 | qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), |
3352 | GFP_KERNEL); | 3349 | GFP_KERNEL); |
3353 | if (!qib_param_field) | 3350 | if (!qib_param_field) |
3354 | return -ENOMEM; | 3351 | return -ENOMEM; |
3355 | 3352 | ||
3356 | memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char)); | ||
3357 | |||
3358 | qeth_create_qib_param_field(card, qib_param_field); | 3353 | qeth_create_qib_param_field(card, qib_param_field); |
3359 | qeth_create_qib_param_field_blkt(card, qib_param_field); | 3354 | qeth_create_qib_param_field_blkt(card, qib_param_field); |
3360 | 3355 | ||
@@ -3865,6 +3860,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | |||
3865 | if ((hdr_mac == QETH_TR_MAC_NC) || | 3860 | if ((hdr_mac == QETH_TR_MAC_NC) || |
3866 | (hdr_mac == QETH_TR_MAC_C)) | 3861 | (hdr_mac == QETH_TR_MAC_C)) |
3867 | return RTN_MULTICAST; | 3862 | return RTN_MULTICAST; |
3863 | break; | ||
3868 | /* eth or so multicast? */ | 3864 | /* eth or so multicast? */ |
3869 | default: | 3865 | default: |
3870 | if ((hdr_mac == QETH_ETH_MAC_V4) || | 3866 | if ((hdr_mac == QETH_ETH_MAC_V4) || |
@@ -4419,6 +4415,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4419 | int elements_needed = 0; | 4415 | int elements_needed = 0; |
4420 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 4416 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
4421 | struct qeth_eddp_context *ctx = NULL; | 4417 | struct qeth_eddp_context *ctx = NULL; |
4418 | int tx_bytes = skb->len; | ||
4422 | int rc; | 4419 | int rc; |
4423 | 4420 | ||
4424 | QETH_DBF_TEXT(trace, 6, "sendpkt"); | 4421 | QETH_DBF_TEXT(trace, 6, "sendpkt"); |
@@ -4499,7 +4496,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4499 | elements_needed, ctx); | 4496 | elements_needed, ctx); |
4500 | if (!rc){ | 4497 | if (!rc){ |
4501 | card->stats.tx_packets++; | 4498 | card->stats.tx_packets++; |
4502 | card->stats.tx_bytes += skb->len; | 4499 | card->stats.tx_bytes += tx_bytes; |
4503 | #ifdef CONFIG_QETH_PERF_STATS | 4500 | #ifdef CONFIG_QETH_PERF_STATS |
4504 | if (skb_shinfo(skb)->tso_size && | 4501 | if (skb_shinfo(skb)->tso_size && |
4505 | !(large_send == QETH_LARGE_SEND_NO)) { | 4502 | !(large_send == QETH_LARGE_SEND_NO)) { |
@@ -4585,38 +4582,11 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) | |||
4585 | case MII_NCONFIG: /* network interface config */ | 4582 | case MII_NCONFIG: /* network interface config */ |
4586 | break; | 4583 | break; |
4587 | default: | 4584 | default: |
4588 | rc = 0; | ||
4589 | break; | 4585 | break; |
4590 | } | 4586 | } |
4591 | return rc; | 4587 | return rc; |
4592 | } | 4588 | } |
4593 | 4589 | ||
4594 | static void | ||
4595 | qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value) | ||
4596 | { | ||
4597 | switch(regnum){ | ||
4598 | case MII_BMCR: /* Basic mode control register */ | ||
4599 | case MII_BMSR: /* Basic mode status register */ | ||
4600 | case MII_PHYSID1: /* PHYS ID 1 */ | ||
4601 | case MII_PHYSID2: /* PHYS ID 2 */ | ||
4602 | case MII_ADVERTISE: /* Advertisement control reg */ | ||
4603 | case MII_LPA: /* Link partner ability reg */ | ||
4604 | case MII_EXPANSION: /* Expansion register */ | ||
4605 | case MII_DCOUNTER: /* disconnect counter */ | ||
4606 | case MII_FCSCOUNTER: /* false carrier counter */ | ||
4607 | case MII_NWAYTEST: /* N-way auto-neg test register */ | ||
4608 | case MII_RERRCOUNTER: /* rx error counter */ | ||
4609 | case MII_SREVISION: /* silicon revision */ | ||
4610 | case MII_RESV1: /* reserved 1 */ | ||
4611 | case MII_LBRERROR: /* loopback, rx, bypass error */ | ||
4612 | case MII_PHYADDR: /* physical address */ | ||
4613 | case MII_RESV2: /* reserved 2 */ | ||
4614 | case MII_TPISTATUS: /* TPI status for 10mbps */ | ||
4615 | case MII_NCONFIG: /* network interface config */ | ||
4616 | default: | ||
4617 | break; | ||
4618 | } | ||
4619 | } | ||
4620 | 4590 | ||
4621 | static inline const char * | 4591 | static inline const char * |
4622 | qeth_arp_get_error_cause(int *rc) | 4592 | qeth_arp_get_error_cause(int *rc) |
@@ -4844,9 +4814,8 @@ qeth_arp_query(struct qeth_card *card, char *udata) | |||
4844 | /* get size of userspace buffer and mask_bits -> 6 bytes */ | 4814 | /* get size of userspace buffer and mask_bits -> 6 bytes */ |
4845 | if (copy_from_user(&qinfo, udata, 6)) | 4815 | if (copy_from_user(&qinfo, udata, 6)) |
4846 | return -EFAULT; | 4816 | return -EFAULT; |
4847 | if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))) | 4817 | if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))) |
4848 | return -ENOMEM; | 4818 | return -ENOMEM; |
4849 | memset(qinfo.udata, 0, qinfo.udata_len); | ||
4850 | qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; | 4819 | qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; |
4851 | iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, | 4820 | iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, |
4852 | IPA_CMD_ASS_ARP_QUERY_INFO, | 4821 | IPA_CMD_ASS_ARP_QUERY_INFO, |
@@ -4994,11 +4963,10 @@ qeth_snmp_command(struct qeth_card *card, char *udata) | |||
4994 | return -EFAULT; | 4963 | return -EFAULT; |
4995 | } | 4964 | } |
4996 | qinfo.udata_len = ureq->hdr.data_len; | 4965 | qinfo.udata_len = ureq->hdr.data_len; |
4997 | if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){ | 4966 | if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){ |
4998 | kfree(ureq); | 4967 | kfree(ureq); |
4999 | return -ENOMEM; | 4968 | return -ENOMEM; |
5000 | } | 4969 | } |
5001 | memset(qinfo.udata, 0, qinfo.udata_len); | ||
5002 | qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); | 4970 | qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); |
5003 | 4971 | ||
5004 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, | 4972 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, |
@@ -5236,21 +5204,6 @@ qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5236 | mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id, | 5204 | mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id, |
5237 | mii_data->reg_num); | 5205 | mii_data->reg_num); |
5238 | break; | 5206 | break; |
5239 | case SIOCSMIIREG: | ||
5240 | rc = -EOPNOTSUPP; | ||
5241 | break; | ||
5242 | /* TODO: remove return if qeth_mdio_write does something */ | ||
5243 | if (!capable(CAP_NET_ADMIN)){ | ||
5244 | rc = -EPERM; | ||
5245 | break; | ||
5246 | } | ||
5247 | mii_data = if_mii(rq); | ||
5248 | if (mii_data->phy_id != 0) | ||
5249 | rc = -EINVAL; | ||
5250 | else | ||
5251 | qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num, | ||
5252 | mii_data->val_in); | ||
5253 | break; | ||
5254 | default: | 5207 | default: |
5255 | rc = -EOPNOTSUPP; | 5208 | rc = -EOPNOTSUPP; |
5256 | } | 5209 | } |
@@ -5604,12 +5557,11 @@ qeth_get_addr_buffer(enum qeth_prot_versions prot) | |||
5604 | { | 5557 | { |
5605 | struct qeth_ipaddr *addr; | 5558 | struct qeth_ipaddr *addr; |
5606 | 5559 | ||
5607 | addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); | 5560 | addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); |
5608 | if (addr == NULL) { | 5561 | if (addr == NULL) { |
5609 | PRINT_WARN("Not enough memory to add address\n"); | 5562 | PRINT_WARN("Not enough memory to add address\n"); |
5610 | return NULL; | 5563 | return NULL; |
5611 | } | 5564 | } |
5612 | memset(addr,0,sizeof(struct qeth_ipaddr)); | ||
5613 | addr->type = QETH_IP_TYPE_NORMAL; | 5565 | addr->type = QETH_IP_TYPE_NORMAL; |
5614 | addr->proto = prot; | 5566 | addr->proto = prot; |
5615 | return addr; | 5567 | return addr; |
@@ -6900,7 +6852,7 @@ qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
6900 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 6852 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
6901 | if (len <= sizeof(__u32)) | 6853 | if (len <= sizeof(__u32)) |
6902 | cmd->data.setassparms.data.flags_32bit = (__u32) data; | 6854 | cmd->data.setassparms.data.flags_32bit = (__u32) data; |
6903 | else if (len > sizeof(__u32)) | 6855 | else /* (len > sizeof(__u32)) */ |
6904 | memcpy(&cmd->data.setassparms.data, (void *) data, len); | 6856 | memcpy(&cmd->data.setassparms.data, (void *) data, len); |
6905 | 6857 | ||
6906 | rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); | 6858 | rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); |
@@ -7379,11 +7331,6 @@ qeth_setrouting_v6(struct qeth_card *card) | |||
7379 | qeth_correct_routing_type(card, &card->options.route6.type, | 7331 | qeth_correct_routing_type(card, &card->options.route6.type, |
7380 | QETH_PROT_IPV6); | 7332 | QETH_PROT_IPV6); |
7381 | 7333 | ||
7382 | if ((card->options.route6.type == NO_ROUTER) || | ||
7383 | ((card->info.type == QETH_CARD_TYPE_OSAE) && | ||
7384 | (card->options.route6.type == MULTICAST_ROUTER) && | ||
7385 | !qeth_is_supported6(card,IPA_OSA_MC_ROUTER))) | ||
7386 | return 0; | ||
7387 | rc = qeth_send_setrouting(card, card->options.route6.type, | 7334 | rc = qeth_send_setrouting(card, card->options.route6.type, |
7388 | QETH_PROT_IPV6); | 7335 | QETH_PROT_IPV6); |
7389 | if (rc) { | 7336 | if (rc) { |
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c index 3c6339df879d..360d782c7ada 100644 --- a/drivers/s390/net/qeth_proc.c +++ b/drivers/s390/net/qeth_proc.c | |||
@@ -74,7 +74,7 @@ qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) | |||
74 | static inline const char * | 74 | static inline const char * |
75 | qeth_get_router_str(struct qeth_card *card, int ipv) | 75 | qeth_get_router_str(struct qeth_card *card, int ipv) |
76 | { | 76 | { |
77 | int routing_type = 0; | 77 | enum qeth_routing_types routing_type = NO_ROUTER; |
78 | 78 | ||
79 | if (ipv == 4) { | 79 | if (ipv == 4) { |
80 | routing_type = card->options.route4.type; | 80 | routing_type = card->options.route4.type; |
@@ -86,26 +86,26 @@ qeth_get_router_str(struct qeth_card *card, int ipv) | |||
86 | #endif /* CONFIG_QETH_IPV6 */ | 86 | #endif /* CONFIG_QETH_IPV6 */ |
87 | } | 87 | } |
88 | 88 | ||
89 | if (routing_type == PRIMARY_ROUTER) | 89 | switch (routing_type){ |
90 | case PRIMARY_ROUTER: | ||
90 | return "pri"; | 91 | return "pri"; |
91 | else if (routing_type == SECONDARY_ROUTER) | 92 | case SECONDARY_ROUTER: |
92 | return "sec"; | 93 | return "sec"; |
93 | else if (routing_type == MULTICAST_ROUTER) { | 94 | case MULTICAST_ROUTER: |
94 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) | 95 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) |
95 | return "mc+"; | 96 | return "mc+"; |
96 | return "mc"; | 97 | return "mc"; |
97 | } else if (routing_type == PRIMARY_CONNECTOR) { | 98 | case PRIMARY_CONNECTOR: |
98 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) | 99 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) |
99 | return "p+c"; | 100 | return "p+c"; |
100 | return "p.c"; | 101 | return "p.c"; |
101 | } else if (routing_type == SECONDARY_CONNECTOR) { | 102 | case SECONDARY_CONNECTOR: |
102 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) | 103 | if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) |
103 | return "s+c"; | 104 | return "s+c"; |
104 | return "s.c"; | 105 | return "s.c"; |
105 | } else if (routing_type == NO_ROUTER) | 106 | default: /* NO_ROUTER */ |
106 | return "no"; | 107 | return "no"; |
107 | else | 108 | } |
108 | return "unk"; | ||
109 | } | 109 | } |
110 | 110 | ||
111 | static int | 111 | static int |
@@ -192,27 +192,27 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it) | |||
192 | CARD_DDEV_ID(card), | 192 | CARD_DDEV_ID(card), |
193 | QETH_CARD_IFNAME(card) | 193 | QETH_CARD_IFNAME(card) |
194 | ); | 194 | ); |
195 | seq_printf(s, " Skb's/buffers received : %li/%i\n" | 195 | seq_printf(s, " Skb's/buffers received : %lu/%u\n" |
196 | " Skb's/buffers sent : %li/%i\n\n", | 196 | " Skb's/buffers sent : %lu/%u\n\n", |
197 | card->stats.rx_packets, card->perf_stats.bufs_rec, | 197 | card->stats.rx_packets, card->perf_stats.bufs_rec, |
198 | card->stats.tx_packets, card->perf_stats.bufs_sent | 198 | card->stats.tx_packets, card->perf_stats.bufs_sent |
199 | ); | 199 | ); |
200 | seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n" | 200 | seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n" |
201 | " Skb's/buffers sent with packing : %i/%i\n\n", | 201 | " Skb's/buffers sent with packing : %u/%u\n\n", |
202 | card->stats.tx_packets - card->perf_stats.skbs_sent_pack, | 202 | card->stats.tx_packets - card->perf_stats.skbs_sent_pack, |
203 | card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, | 203 | card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, |
204 | card->perf_stats.skbs_sent_pack, | 204 | card->perf_stats.skbs_sent_pack, |
205 | card->perf_stats.bufs_sent_pack | 205 | card->perf_stats.bufs_sent_pack |
206 | ); | 206 | ); |
207 | seq_printf(s, " Skbs sent in SG mode : %i\n" | 207 | seq_printf(s, " Skbs sent in SG mode : %u\n" |
208 | " Skb fragments sent in SG mode : %i\n\n", | 208 | " Skb fragments sent in SG mode : %u\n\n", |
209 | card->perf_stats.sg_skbs_sent, | 209 | card->perf_stats.sg_skbs_sent, |
210 | card->perf_stats.sg_frags_sent); | 210 | card->perf_stats.sg_frags_sent); |
211 | seq_printf(s, " large_send tx (in Kbytes) : %i\n" | 211 | seq_printf(s, " large_send tx (in Kbytes) : %u\n" |
212 | " large_send count : %i\n\n", | 212 | " large_send count : %u\n\n", |
213 | card->perf_stats.large_send_bytes >> 10, | 213 | card->perf_stats.large_send_bytes >> 10, |
214 | card->perf_stats.large_send_cnt); | 214 | card->perf_stats.large_send_cnt); |
215 | seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n" | 215 | seq_printf(s, " Packing state changes no pkg.->packing : %u/%u\n" |
216 | " Watermarks L/H : %i/%i\n" | 216 | " Watermarks L/H : %i/%i\n" |
217 | " Current buffer usage (outbound q's) : " | 217 | " Current buffer usage (outbound q's) : " |
218 | "%i/%i/%i/%i\n\n", | 218 | "%i/%i/%i/%i\n\n", |
@@ -229,16 +229,16 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it) | |||
229 | atomic_read(&card->qdio.out_qs[3]->used_buffers) | 229 | atomic_read(&card->qdio.out_qs[3]->used_buffers) |
230 | : 0 | 230 | : 0 |
231 | ); | 231 | ); |
232 | seq_printf(s, " Inbound handler time (in us) : %i\n" | 232 | seq_printf(s, " Inbound handler time (in us) : %u\n" |
233 | " Inbound handler count : %i\n" | 233 | " Inbound handler count : %u\n" |
234 | " Inbound do_QDIO time (in us) : %i\n" | 234 | " Inbound do_QDIO time (in us) : %u\n" |
235 | " Inbound do_QDIO count : %i\n\n" | 235 | " Inbound do_QDIO count : %u\n\n" |
236 | " Outbound handler time (in us) : %i\n" | 236 | " Outbound handler time (in us) : %u\n" |
237 | " Outbound handler count : %i\n\n" | 237 | " Outbound handler count : %u\n\n" |
238 | " Outbound time (in us, incl QDIO) : %i\n" | 238 | " Outbound time (in us, incl QDIO) : %u\n" |
239 | " Outbound count : %i\n" | 239 | " Outbound count : %u\n" |
240 | " Outbound do_QDIO time (in us) : %i\n" | 240 | " Outbound do_QDIO time (in us) : %u\n" |
241 | " Outbound do_QDIO count : %i\n\n", | 241 | " Outbound do_QDIO count : %u\n\n", |
242 | card->perf_stats.inbound_time, | 242 | card->perf_stats.inbound_time, |
243 | card->perf_stats.inbound_cnt, | 243 | card->perf_stats.inbound_cnt, |
244 | card->perf_stats.inbound_do_qdio_time, | 244 | card->perf_stats.inbound_do_qdio_time, |
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index c1831f572585..882d419e4160 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c | |||
@@ -115,7 +115,7 @@ qeth_dev_portno_store(struct device *dev, struct device_attribute *attr, const c | |||
115 | return -EPERM; | 115 | return -EPERM; |
116 | 116 | ||
117 | portno = simple_strtoul(buf, &tmp, 16); | 117 | portno = simple_strtoul(buf, &tmp, 16); |
118 | if ((portno < 0) || (portno > MAX_PORTNO)){ | 118 | if (portno > MAX_PORTNO){ |
119 | PRINT_WARN("portno 0x%X is out of range\n", portno); | 119 | PRINT_WARN("portno 0x%X is out of range\n", portno); |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | } | 121 | } |
@@ -1145,11 +1145,10 @@ qeth_dev_ipato_add_store(const char *buf, size_t count, | |||
1145 | if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits))) | 1145 | if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits))) |
1146 | return rc; | 1146 | return rc; |
1147 | 1147 | ||
1148 | if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){ | 1148 | if (!(ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){ |
1149 | PRINT_WARN("No memory to allocate ipato entry\n"); | 1149 | PRINT_WARN("No memory to allocate ipato entry\n"); |
1150 | return -ENOMEM; | 1150 | return -ENOMEM; |
1151 | } | 1151 | } |
1152 | memset(ipatoe, 0, sizeof(struct qeth_ipato_entry)); | ||
1153 | ipatoe->proto = proto; | 1152 | ipatoe->proto = proto; |
1154 | memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); | 1153 | memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); |
1155 | ipatoe->mask_bits = mask_bits; | 1154 | ipatoe->mask_bits = mask_bits; |
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c index e3f647169827..3c7145d9f9a1 100644 --- a/drivers/s390/s390_rdev.c +++ b/drivers/s390/s390_rdev.c | |||
@@ -27,10 +27,9 @@ s390_root_dev_register(const char *name) | |||
27 | 27 | ||
28 | if (!strlen(name)) | 28 | if (!strlen(name)) |
29 | return ERR_PTR(-EINVAL); | 29 | return ERR_PTR(-EINVAL); |
30 | dev = kmalloc(sizeof(struct device), GFP_KERNEL); | 30 | dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
31 | if (!dev) | 31 | if (!dev) |
32 | return ERR_PTR(-ENOMEM); | 32 | return ERR_PTR(-ENOMEM); |
33 | memset(dev, 0, sizeof(struct device)); | ||
34 | strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); | 33 | strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); |
35 | dev->release = s390_root_dev_release; | 34 | dev->release = s390_root_dev_release; |
36 | ret = device_register(dev); | 35 | ret = device_register(dev); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 95b92f317b6f..395cfc6a344f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit) | |||
829 | device_unregister(&unit->sysfs_device); | 829 | device_unregister(&unit->sysfs_device); |
830 | } | 830 | } |
831 | 831 | ||
832 | static void * | ||
833 | zfcp_mempool_alloc(gfp_t gfp_mask, void *size) | ||
834 | { | ||
835 | return kmalloc((size_t) size, gfp_mask); | ||
836 | } | ||
837 | |||
838 | static void | ||
839 | zfcp_mempool_free(void *element, void *size) | ||
840 | { | ||
841 | kfree(element); | ||
842 | } | ||
843 | |||
844 | /* | 832 | /* |
845 | * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI | 833 | * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI |
846 | * commands. | 834 | * commands. |
@@ -853,51 +841,39 @@ static int | |||
853 | zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | 841 | zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) |
854 | { | 842 | { |
855 | adapter->pool.fsf_req_erp = | 843 | adapter->pool.fsf_req_erp = |
856 | mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR, | 844 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, |
857 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 845 | sizeof(struct zfcp_fsf_req_pool_element)); |
858 | sizeof(struct zfcp_fsf_req_pool_element)); | 846 | if (!adapter->pool.fsf_req_erp) |
859 | |||
860 | if (NULL == adapter->pool.fsf_req_erp) | ||
861 | return -ENOMEM; | 847 | return -ENOMEM; |
862 | 848 | ||
863 | adapter->pool.fsf_req_scsi = | 849 | adapter->pool.fsf_req_scsi = |
864 | mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR, | 850 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, |
865 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 851 | sizeof(struct zfcp_fsf_req_pool_element)); |
866 | sizeof(struct zfcp_fsf_req_pool_element)); | 852 | if (!adapter->pool.fsf_req_scsi) |
867 | |||
868 | if (NULL == adapter->pool.fsf_req_scsi) | ||
869 | return -ENOMEM; | 853 | return -ENOMEM; |
870 | 854 | ||
871 | adapter->pool.fsf_req_abort = | 855 | adapter->pool.fsf_req_abort = |
872 | mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR, | 856 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, |
873 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 857 | sizeof(struct zfcp_fsf_req_pool_element)); |
874 | sizeof(struct zfcp_fsf_req_pool_element)); | 858 | if (!adapter->pool.fsf_req_abort) |
875 | |||
876 | if (NULL == adapter->pool.fsf_req_abort) | ||
877 | return -ENOMEM; | 859 | return -ENOMEM; |
878 | 860 | ||
879 | adapter->pool.fsf_req_status_read = | 861 | adapter->pool.fsf_req_status_read = |
880 | mempool_create(ZFCP_POOL_STATUS_READ_NR, | 862 | mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, |
881 | zfcp_mempool_alloc, zfcp_mempool_free, | 863 | sizeof(struct zfcp_fsf_req)); |
882 | (void *) sizeof(struct zfcp_fsf_req)); | 864 | if (!adapter->pool.fsf_req_status_read) |
883 | |||
884 | if (NULL == adapter->pool.fsf_req_status_read) | ||
885 | return -ENOMEM; | 865 | return -ENOMEM; |
886 | 866 | ||
887 | adapter->pool.data_status_read = | 867 | adapter->pool.data_status_read = |
888 | mempool_create(ZFCP_POOL_STATUS_READ_NR, | 868 | mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, |
889 | zfcp_mempool_alloc, zfcp_mempool_free, | 869 | sizeof(struct fsf_status_read_buffer)); |
890 | (void *) sizeof(struct fsf_status_read_buffer)); | 870 | if (!adapter->pool.data_status_read) |
891 | |||
892 | if (NULL == adapter->pool.data_status_read) | ||
893 | return -ENOMEM; | 871 | return -ENOMEM; |
894 | 872 | ||
895 | adapter->pool.data_gid_pn = | 873 | adapter->pool.data_gid_pn = |
896 | mempool_create(ZFCP_POOL_DATA_GID_PN_NR, | 874 | mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, |
897 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 875 | sizeof(struct zfcp_gid_pn_data)); |
898 | sizeof(struct zfcp_gid_pn_data)); | 876 | if (!adapter->pool.data_gid_pn) |
899 | |||
900 | if (NULL == adapter->pool.data_gid_pn) | ||
901 | return -ENOMEM; | 877 | return -ENOMEM; |
902 | 878 | ||
903 | return 0; | 879 | return 0; |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 7f551d66f47f..6eba56cd89ba 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -664,6 +664,7 @@ do { \ | |||
664 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 | 664 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 |
665 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 | 665 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 |
666 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 | 666 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 |
667 | #define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 | ||
667 | 668 | ||
668 | /* FSF request status (this does not have a common part) */ | 669 | /* FSF request status (this does not have a common part) */ |
669 | #define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000 | 670 | #define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000 |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e3c4bdd29a60..57cb628a05aa 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -3391,10 +3391,13 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3391 | && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, | 3391 | && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, |
3392 | &unit->status)) | 3392 | &unit->status)) |
3393 | && !unit->device | 3393 | && !unit->device |
3394 | && port->rport) | 3394 | && port->rport) { |
3395 | scsi_add_device(port->adapter->scsi_host, 0, | 3395 | atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, |
3396 | port->rport->scsi_target_id, | 3396 | &unit->status); |
3397 | unit->scsi_lun); | 3397 | scsi_scan_target(&port->rport->dev, 0, |
3398 | port->rport->scsi_target_id, | ||
3399 | unit->scsi_lun, 0); | ||
3400 | } | ||
3398 | zfcp_unit_put(unit); | 3401 | zfcp_unit_put(unit); |
3399 | break; | 3402 | break; |
3400 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 3403 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 9f6b4d7a46f3..9e6d07d7b3c8 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -68,7 +68,7 @@ struct zfcp_data zfcp_data = { | |||
68 | eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler, | 68 | eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler, |
69 | /* FIXME(openfcp): Tune */ | 69 | /* FIXME(openfcp): Tune */ |
70 | can_queue: 4096, | 70 | can_queue: 4096, |
71 | this_id: 0, | 71 | this_id: -1, |
72 | /* | 72 | /* |
73 | * FIXME: | 73 | * FIXME: |
74 | * one less? can zfcp_create_sbale cope with it? | 74 | * one less? can zfcp_create_sbale cope with it? |
@@ -183,7 +183,8 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) | |||
183 | 183 | ||
184 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 184 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
185 | unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); | 185 | unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); |
186 | if (unit) { | 186 | if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, |
187 | &unit->status)) { | ||
187 | sdp->hostdata = unit; | 188 | sdp->hostdata = unit; |
188 | unit->device = sdp; | 189 | unit->device = sdp; |
189 | zfcp_unit_get(unit); | 190 | zfcp_unit_get(unit); |
@@ -208,6 +209,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
208 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 209 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
209 | 210 | ||
210 | if (unit) { | 211 | if (unit) { |
212 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); | ||
211 | sdpnt->hostdata = NULL; | 213 | sdpnt->hostdata = NULL; |
212 | unit->device = NULL; | 214 | unit->device = NULL; |
213 | zfcp_unit_put(unit); | 215 | zfcp_unit_put(unit); |
@@ -291,7 +293,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, | |||
291 | "on port 0x%016Lx in recovery\n", | 293 | "on port 0x%016Lx in recovery\n", |
292 | zfcp_get_busid_by_unit(unit), | 294 | zfcp_get_busid_by_unit(unit), |
293 | unit->fcp_lun, unit->port->wwpn); | 295 | unit->fcp_lun, unit->port->wwpn); |
294 | retval = SCSI_MLQUEUE_DEVICE_BUSY; | 296 | zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); |
295 | goto out; | 297 | goto out; |
296 | } | 298 | } |
297 | 299 | ||