diff options
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd_diag.c | 1 | ||||
-rw-r--r-- | drivers/s390/char/monwriter.c | 6 | ||||
-rw-r--r-- | drivers/s390/char/raw3270.c | 1 | ||||
-rw-r--r-- | drivers/s390/char/vmur.c | 426 | ||||
-rw-r--r-- | drivers/s390/char/vmur.h | 6 | ||||
-rw-r--r-- | drivers/s390/cio/cmf.c | 10 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 1 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 5 | ||||
-rw-r--r-- | drivers/s390/cio/device_id.c | 48 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.c | 97 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 5 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 41 |
12 files changed, 354 insertions, 293 deletions
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index eccac1c3b71b..d32c60dbdd82 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/s390_ext.h> | 24 | #include <asm/s390_ext.h> |
25 | #include <asm/todclk.h> | 25 | #include <asm/todclk.h> |
26 | #include <asm/vtoc.h> | 26 | #include <asm/vtoc.h> |
27 | #include <asm/diag.h> | ||
27 | 28 | ||
28 | #include "dasd_int.h" | 29 | #include "dasd_int.h" |
29 | #include "dasd_diag.h" | 30 | #include "dasd_diag.h" |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 268598ef3efe..20442fbf9346 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
18 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
19 | #include <linux/poll.h> | 19 | #include <linux/poll.h> |
20 | #include <linux/mutex.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/ebcdic.h> | 22 | #include <asm/ebcdic.h> |
22 | #include <asm/io.h> | 23 | #include <asm/io.h> |
@@ -41,6 +42,7 @@ struct mon_private { | |||
41 | size_t hdr_to_read; | 42 | size_t hdr_to_read; |
42 | size_t data_to_read; | 43 | size_t data_to_read; |
43 | struct mon_buf *current_buf; | 44 | struct mon_buf *current_buf; |
45 | struct mutex thread_mutex; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | /* | 48 | /* |
@@ -179,6 +181,7 @@ static int monwrite_open(struct inode *inode, struct file *filp) | |||
179 | return -ENOMEM; | 181 | return -ENOMEM; |
180 | INIT_LIST_HEAD(&monpriv->list); | 182 | INIT_LIST_HEAD(&monpriv->list); |
181 | monpriv->hdr_to_read = sizeof(monpriv->hdr); | 183 | monpriv->hdr_to_read = sizeof(monpriv->hdr); |
184 | mutex_init(&monpriv->thread_mutex); | ||
182 | filp->private_data = monpriv; | 185 | filp->private_data = monpriv; |
183 | return nonseekable_open(inode, filp); | 186 | return nonseekable_open(inode, filp); |
184 | } | 187 | } |
@@ -209,6 +212,7 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data, | |||
209 | void *to; | 212 | void *to; |
210 | int rc; | 213 | int rc; |
211 | 214 | ||
215 | mutex_lock(&monpriv->thread_mutex); | ||
212 | for (written = 0; written < count; ) { | 216 | for (written = 0; written < count; ) { |
213 | if (monpriv->hdr_to_read) { | 217 | if (monpriv->hdr_to_read) { |
214 | len = min(count - written, monpriv->hdr_to_read); | 218 | len = min(count - written, monpriv->hdr_to_read); |
@@ -247,11 +251,13 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data, | |||
247 | } | 251 | } |
248 | monpriv->hdr_to_read = sizeof(monpriv->hdr); | 252 | monpriv->hdr_to_read = sizeof(monpriv->hdr); |
249 | } | 253 | } |
254 | mutex_unlock(&monpriv->thread_mutex); | ||
250 | return written; | 255 | return written; |
251 | 256 | ||
252 | out_error: | 257 | out_error: |
253 | monpriv->data_to_read = 0; | 258 | monpriv->data_to_read = 0; |
254 | monpriv->hdr_to_read = sizeof(struct monwrite_hdr); | 259 | monpriv->hdr_to_read = sizeof(struct monwrite_hdr); |
260 | mutex_unlock(&monpriv->thread_mutex); | ||
255 | return rc; | 261 | return rc; |
256 | } | 262 | } |
257 | 263 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 4f2f81b16cfa..2edd5fb6d3dc 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/ccwdev.h> | 21 | #include <asm/ccwdev.h> |
22 | #include <asm/cio.h> | 22 | #include <asm/cio.h> |
23 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
24 | #include <asm/diag.h> | ||
24 | 25 | ||
25 | #include "raw3270.h" | 26 | #include "raw3270.h" |
26 | 27 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 161867cebd8c..d70a6e65bf14 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/cio.h> | 14 | #include <asm/cio.h> |
15 | #include <asm/ccwdev.h> | 15 | #include <asm/ccwdev.h> |
16 | #include <asm/debug.h> | 16 | #include <asm/debug.h> |
17 | #include <asm/diag.h> | ||
17 | 18 | ||
18 | #include "vmur.h" | 19 | #include "vmur.h" |
19 | 20 | ||
@@ -68,8 +69,26 @@ static struct ccw_driver ur_driver = { | |||
68 | .set_offline = ur_set_offline, | 69 | .set_offline = ur_set_offline, |
69 | }; | 70 | }; |
70 | 71 | ||
72 | static DEFINE_MUTEX(vmur_mutex); | ||
73 | |||
71 | /* | 74 | /* |
72 | * Allocation, freeing, getting and putting of urdev structures | 75 | * Allocation, freeing, getting and putting of urdev structures |
76 | * | ||
77 | * Each ur device (urd) contains a reference to its corresponding ccw device | ||
78 | * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the | ||
79 | * ur device using the cdev->dev.driver_data pointer. | ||
80 | * | ||
81 | * urd references: | ||
82 | * - ur_probe gets a urd reference, ur_remove drops the reference | ||
83 | * (cdev->dev.driver_data) | ||
84 | * - ur_open gets a urd reference, ur_relase drops the reference | ||
85 | * (urf->urd) | ||
86 | * | ||
87 | * cdev references: | ||
88 | * - urdev_alloc get a cdev reference (urd->cdev) | ||
89 | * - urdev_free drops the cdev reference (urd->cdev) | ||
90 | * | ||
91 | * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock | ||
73 | */ | 92 | */ |
74 | static struct urdev *urdev_alloc(struct ccw_device *cdev) | 93 | static struct urdev *urdev_alloc(struct ccw_device *cdev) |
75 | { | 94 | { |
@@ -78,51 +97,72 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev) | |||
78 | urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); | 97 | urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); |
79 | if (!urd) | 98 | if (!urd) |
80 | return NULL; | 99 | return NULL; |
81 | urd->cdev = cdev; | ||
82 | urd->reclen = cdev->id.driver_info; | 100 | urd->reclen = cdev->id.driver_info; |
83 | ccw_device_get_id(cdev, &urd->dev_id); | 101 | ccw_device_get_id(cdev, &urd->dev_id); |
84 | mutex_init(&urd->io_mutex); | 102 | mutex_init(&urd->io_mutex); |
85 | mutex_init(&urd->open_mutex); | 103 | mutex_init(&urd->open_mutex); |
104 | atomic_set(&urd->ref_count, 1); | ||
105 | urd->cdev = cdev; | ||
106 | get_device(&cdev->dev); | ||
86 | return urd; | 107 | return urd; |
87 | } | 108 | } |
88 | 109 | ||
89 | static void urdev_free(struct urdev *urd) | 110 | static void urdev_free(struct urdev *urd) |
90 | { | 111 | { |
112 | TRACE("urdev_free: %p\n", urd); | ||
113 | if (urd->cdev) | ||
114 | put_device(&urd->cdev->dev); | ||
91 | kfree(urd); | 115 | kfree(urd); |
92 | } | 116 | } |
93 | 117 | ||
94 | /* | 118 | static void urdev_get(struct urdev *urd) |
95 | * This is how the character device driver gets a reference to a | 119 | { |
96 | * ur device. When this call returns successfully, a reference has | 120 | atomic_inc(&urd->ref_count); |
97 | * been taken (by get_device) on the underlying kobject. The recipient | 121 | } |
98 | * of this urdev pointer must eventually drop it with urdev_put(urd) | 122 | |
99 | * which does the corresponding put_device(). | 123 | static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) |
100 | */ | 124 | { |
125 | struct urdev *urd; | ||
126 | unsigned long flags; | ||
127 | |||
128 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | ||
129 | urd = cdev->dev.driver_data; | ||
130 | if (urd) | ||
131 | urdev_get(urd); | ||
132 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
133 | return urd; | ||
134 | } | ||
135 | |||
101 | static struct urdev *urdev_get_from_devno(u16 devno) | 136 | static struct urdev *urdev_get_from_devno(u16 devno) |
102 | { | 137 | { |
103 | char bus_id[16]; | 138 | char bus_id[16]; |
104 | struct ccw_device *cdev; | 139 | struct ccw_device *cdev; |
140 | struct urdev *urd; | ||
105 | 141 | ||
106 | sprintf(bus_id, "0.0.%04x", devno); | 142 | sprintf(bus_id, "0.0.%04x", devno); |
107 | cdev = get_ccwdev_by_busid(&ur_driver, bus_id); | 143 | cdev = get_ccwdev_by_busid(&ur_driver, bus_id); |
108 | if (!cdev) | 144 | if (!cdev) |
109 | return NULL; | 145 | return NULL; |
110 | 146 | urd = urdev_get_from_cdev(cdev); | |
111 | return cdev->dev.driver_data; | 147 | put_device(&cdev->dev); |
148 | return urd; | ||
112 | } | 149 | } |
113 | 150 | ||
114 | static void urdev_put(struct urdev *urd) | 151 | static void urdev_put(struct urdev *urd) |
115 | { | 152 | { |
116 | put_device(&urd->cdev->dev); | 153 | if (atomic_dec_and_test(&urd->ref_count)) |
154 | urdev_free(urd); | ||
117 | } | 155 | } |
118 | 156 | ||
119 | /* | 157 | /* |
120 | * Low-level functions to do I/O to a ur device. | 158 | * Low-level functions to do I/O to a ur device. |
121 | * alloc_chan_prog | 159 | * alloc_chan_prog |
160 | * free_chan_prog | ||
122 | * do_ur_io | 161 | * do_ur_io |
123 | * ur_int_handler | 162 | * ur_int_handler |
124 | * | 163 | * |
125 | * alloc_chan_prog allocates and builds the channel program | 164 | * alloc_chan_prog allocates and builds the channel program |
165 | * free_chan_prog frees memory of the channel program | ||
126 | * | 166 | * |
127 | * do_ur_io issues the channel program to the device and blocks waiting | 167 | * do_ur_io issues the channel program to the device and blocks waiting |
128 | * on a completion event it publishes at urd->io_done. The function | 168 | * on a completion event it publishes at urd->io_done. The function |
@@ -137,6 +177,16 @@ static void urdev_put(struct urdev *urd) | |||
137 | * address pointer that alloc_chan_prog returned. | 177 | * address pointer that alloc_chan_prog returned. |
138 | */ | 178 | */ |
139 | 179 | ||
180 | static void free_chan_prog(struct ccw1 *cpa) | ||
181 | { | ||
182 | struct ccw1 *ptr = cpa; | ||
183 | |||
184 | while (ptr->cda) { | ||
185 | kfree((void *)(addr_t) ptr->cda); | ||
186 | ptr++; | ||
187 | } | ||
188 | kfree(cpa); | ||
189 | } | ||
140 | 190 | ||
141 | /* | 191 | /* |
142 | * alloc_chan_prog | 192 | * alloc_chan_prog |
@@ -144,44 +194,45 @@ static void urdev_put(struct urdev *urd) | |||
144 | * with a final NOP CCW command-chained on (which ensures that CE and DE | 194 | * with a final NOP CCW command-chained on (which ensures that CE and DE |
145 | * are presented together in a single interrupt instead of as separate | 195 | * are presented together in a single interrupt instead of as separate |
146 | * interrupts unless an incorrect length indication kicks in first). The | 196 | * interrupts unless an incorrect length indication kicks in first). The |
147 | * data length in each CCW is reclen. The caller must ensure that count | 197 | * data length in each CCW is reclen. |
148 | * is an integral multiple of reclen. | ||
149 | * The channel program pointer returned by this function must be freed | ||
150 | * with kfree. The caller is responsible for checking that | ||
151 | * count/reclen is not ridiculously large. | ||
152 | */ | 198 | */ |
153 | static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) | 199 | static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, |
200 | int reclen) | ||
154 | { | 201 | { |
155 | size_t num_ccws; | ||
156 | struct ccw1 *cpa; | 202 | struct ccw1 *cpa; |
203 | void *kbuf; | ||
157 | int i; | 204 | int i; |
158 | 205 | ||
159 | TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); | 206 | TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); |
160 | 207 | ||
161 | /* | 208 | /* |
162 | * We chain a NOP onto the writes to force CE+DE together. | 209 | * We chain a NOP onto the writes to force CE+DE together. |
163 | * That means we allocate room for CCWs to cover count/reclen | 210 | * That means we allocate room for CCWs to cover count/reclen |
164 | * records plus a NOP. | 211 | * records plus a NOP. |
165 | */ | 212 | */ |
166 | num_ccws = count / reclen + 1; | 213 | cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), |
167 | cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); | 214 | GFP_KERNEL | GFP_DMA); |
168 | if (!cpa) | 215 | if (!cpa) |
169 | return NULL; | 216 | return ERR_PTR(-ENOMEM); |
170 | 217 | ||
171 | for (i = 0; count; i++) { | 218 | for (i = 0; i < rec_count; i++) { |
172 | cpa[i].cmd_code = WRITE_CCW_CMD; | 219 | cpa[i].cmd_code = WRITE_CCW_CMD; |
173 | cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; | 220 | cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; |
174 | cpa[i].count = reclen; | 221 | cpa[i].count = reclen; |
175 | cpa[i].cda = __pa(buf); | 222 | kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); |
176 | buf += reclen; | 223 | if (!kbuf) { |
177 | count -= reclen; | 224 | free_chan_prog(cpa); |
225 | return ERR_PTR(-ENOMEM); | ||
226 | } | ||
227 | cpa[i].cda = (u32)(addr_t) kbuf; | ||
228 | if (copy_from_user(kbuf, ubuf, reclen)) { | ||
229 | free_chan_prog(cpa); | ||
230 | return ERR_PTR(-EFAULT); | ||
231 | } | ||
232 | ubuf += reclen; | ||
178 | } | 233 | } |
179 | /* The following NOP CCW forces CE+DE to be presented together */ | 234 | /* The following NOP CCW forces CE+DE to be presented together */ |
180 | cpa[i].cmd_code = CCW_CMD_NOOP; | 235 | cpa[i].cmd_code = CCW_CMD_NOOP; |
181 | cpa[i].flags = 0; | ||
182 | cpa[i].count = 0; | ||
183 | cpa[i].cda = 0; | ||
184 | |||
185 | return cpa; | 236 | return cpa; |
186 | } | 237 | } |
187 | 238 | ||
@@ -189,7 +240,7 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) | |||
189 | { | 240 | { |
190 | int rc; | 241 | int rc; |
191 | struct ccw_device *cdev = urd->cdev; | 242 | struct ccw_device *cdev = urd->cdev; |
192 | DECLARE_COMPLETION(event); | 243 | DECLARE_COMPLETION_ONSTACK(event); |
193 | 244 | ||
194 | TRACE("do_ur_io: cpa=%p\n", cpa); | 245 | TRACE("do_ur_io: cpa=%p\n", cpa); |
195 | 246 | ||
@@ -232,6 +283,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
232 | return; | 283 | return; |
233 | } | 284 | } |
234 | urd = cdev->dev.driver_data; | 285 | urd = cdev->dev.driver_data; |
286 | BUG_ON(!urd); | ||
235 | /* On special conditions irb is an error pointer */ | 287 | /* On special conditions irb is an error pointer */ |
236 | if (IS_ERR(irb)) | 288 | if (IS_ERR(irb)) |
237 | urd->io_request_rc = PTR_ERR(irb); | 289 | urd->io_request_rc = PTR_ERR(irb); |
@@ -249,9 +301,15 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
249 | static ssize_t ur_attr_reclen_show(struct device *dev, | 301 | static ssize_t ur_attr_reclen_show(struct device *dev, |
250 | struct device_attribute *attr, char *buf) | 302 | struct device_attribute *attr, char *buf) |
251 | { | 303 | { |
252 | struct urdev *urd = dev->driver_data; | 304 | struct urdev *urd; |
305 | int rc; | ||
253 | 306 | ||
254 | return sprintf(buf, "%zu\n", urd->reclen); | 307 | urd = urdev_get_from_cdev(to_ccwdev(dev)); |
308 | if (!urd) | ||
309 | return -ENODEV; | ||
310 | rc = sprintf(buf, "%zu\n", urd->reclen); | ||
311 | urdev_put(urd); | ||
312 | return rc; | ||
255 | } | 313 | } |
256 | 314 | ||
257 | static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); | 315 | static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); |
@@ -325,24 +383,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata, | |||
325 | size_t count, size_t reclen, loff_t *ppos) | 383 | size_t count, size_t reclen, loff_t *ppos) |
326 | { | 384 | { |
327 | struct ccw1 *cpa; | 385 | struct ccw1 *cpa; |
328 | char *buf; | ||
329 | int rc; | 386 | int rc; |
330 | 387 | ||
331 | /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ | 388 | cpa = alloc_chan_prog(udata, count / reclen, reclen); |
332 | buf = kmalloc(count, GFP_KERNEL | GFP_DMA); | 389 | if (IS_ERR(cpa)) |
333 | if (!buf) | 390 | return PTR_ERR(cpa); |
334 | return -ENOMEM; | ||
335 | |||
336 | if (copy_from_user(buf, udata, count)) { | ||
337 | rc = -EFAULT; | ||
338 | goto fail_kfree_buf; | ||
339 | } | ||
340 | |||
341 | cpa = alloc_chan_prog(buf, count, reclen); | ||
342 | if (!cpa) { | ||
343 | rc = -ENOMEM; | ||
344 | goto fail_kfree_buf; | ||
345 | } | ||
346 | 391 | ||
347 | rc = do_ur_io(urd, cpa); | 392 | rc = do_ur_io(urd, cpa); |
348 | if (rc) | 393 | if (rc) |
@@ -354,10 +399,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata, | |||
354 | } | 399 | } |
355 | *ppos += count; | 400 | *ppos += count; |
356 | rc = count; | 401 | rc = count; |
402 | |||
357 | fail_kfree_cpa: | 403 | fail_kfree_cpa: |
358 | kfree(cpa); | 404 | free_chan_prog(cpa); |
359 | fail_kfree_buf: | ||
360 | kfree(buf); | ||
361 | return rc; | 405 | return rc; |
362 | } | 406 | } |
363 | 407 | ||
@@ -380,31 +424,6 @@ static ssize_t ur_write(struct file *file, const char __user *udata, | |||
380 | return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); | 424 | return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); |
381 | } | 425 | } |
382 | 426 | ||
383 | static int do_diag_14(unsigned long rx, unsigned long ry1, | ||
384 | unsigned long subcode) | ||
385 | { | ||
386 | register unsigned long _ry1 asm("2") = ry1; | ||
387 | register unsigned long _ry2 asm("3") = subcode; | ||
388 | int rc = 0; | ||
389 | |||
390 | asm volatile( | ||
391 | #ifdef CONFIG_64BIT | ||
392 | " sam31\n" | ||
393 | " diag %2,2,0x14\n" | ||
394 | " sam64\n" | ||
395 | #else | ||
396 | " diag %2,2,0x14\n" | ||
397 | #endif | ||
398 | " ipm %0\n" | ||
399 | " srl %0,28\n" | ||
400 | : "=d" (rc), "+d" (_ry2) | ||
401 | : "d" (rx), "d" (_ry1) | ||
402 | : "cc"); | ||
403 | |||
404 | TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc); | ||
405 | return rc; | ||
406 | } | ||
407 | |||
408 | /* | 427 | /* |
409 | * diagnose code 0x14 subcode 0x0028 - position spool file to designated | 428 | * diagnose code 0x14 subcode 0x0028 - position spool file to designated |
410 | * record | 429 | * record |
@@ -416,7 +435,7 @@ static int diag_position_to_record(int devno, int record) | |||
416 | { | 435 | { |
417 | int cc; | 436 | int cc; |
418 | 437 | ||
419 | cc = do_diag_14(record, devno, 0x28); | 438 | cc = diag14(record, devno, 0x28); |
420 | switch (cc) { | 439 | switch (cc) { |
421 | case 0: | 440 | case 0: |
422 | return 0; | 441 | return 0; |
@@ -441,7 +460,7 @@ static int diag_read_file(int devno, char *buf) | |||
441 | { | 460 | { |
442 | int cc; | 461 | int cc; |
443 | 462 | ||
444 | cc = do_diag_14((unsigned long) buf, devno, 0x00); | 463 | cc = diag14((unsigned long) buf, devno, 0x00); |
445 | switch (cc) { | 464 | switch (cc) { |
446 | case 0: | 465 | case 0: |
447 | return 0; | 466 | return 0; |
@@ -473,7 +492,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, | |||
473 | return rc; | 492 | return rc; |
474 | 493 | ||
475 | len = min((size_t) PAGE_SIZE, count); | 494 | len = min((size_t) PAGE_SIZE, count); |
476 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 495 | buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); |
477 | if (!buf) | 496 | if (!buf) |
478 | return -ENOMEM; | 497 | return -ENOMEM; |
479 | 498 | ||
@@ -500,7 +519,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, | |||
500 | *offs += copied; | 519 | *offs += copied; |
501 | rc = copied; | 520 | rc = copied; |
502 | fail: | 521 | fail: |
503 | kfree(buf); | 522 | free_page((unsigned long) buf); |
504 | return rc; | 523 | return rc; |
505 | } | 524 | } |
506 | 525 | ||
@@ -534,7 +553,7 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid) | |||
534 | { | 553 | { |
535 | int cc; | 554 | int cc; |
536 | 555 | ||
537 | cc = do_diag_14((unsigned long) buf, spid, 0xfff); | 556 | cc = diag14((unsigned long) buf, spid, 0xfff); |
538 | switch (cc) { | 557 | switch (cc) { |
539 | case 0: | 558 | case 0: |
540 | return 0; | 559 | return 0; |
@@ -543,56 +562,97 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid) | |||
543 | } | 562 | } |
544 | } | 563 | } |
545 | 564 | ||
546 | static int verify_device(struct urdev *urd) | 565 | static int verify_uri_device(struct urdev *urd) |
547 | { | 566 | { |
548 | struct file_control_block fcb; | 567 | struct file_control_block *fcb; |
549 | char *buf; | 568 | char *buf; |
550 | int rc; | 569 | int rc; |
551 | 570 | ||
571 | fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); | ||
572 | if (!fcb) | ||
573 | return -ENOMEM; | ||
574 | |||
575 | /* check for empty reader device (beginning of chain) */ | ||
576 | rc = diag_read_next_file_info(fcb, 0); | ||
577 | if (rc) | ||
578 | goto fail_free_fcb; | ||
579 | |||
580 | /* if file is in hold status, we do not read it */ | ||
581 | if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { | ||
582 | rc = -EPERM; | ||
583 | goto fail_free_fcb; | ||
584 | } | ||
585 | |||
586 | /* open file on virtual reader */ | ||
587 | buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); | ||
588 | if (!buf) { | ||
589 | rc = -ENOMEM; | ||
590 | goto fail_free_fcb; | ||
591 | } | ||
592 | rc = diag_read_file(urd->dev_id.devno, buf); | ||
593 | if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ | ||
594 | goto fail_free_buf; | ||
595 | |||
596 | /* check if the file on top of the queue is open now */ | ||
597 | rc = diag_read_next_file_info(fcb, 0); | ||
598 | if (rc) | ||
599 | goto fail_free_buf; | ||
600 | if (!(fcb->file_stat & FLG_IN_USE)) { | ||
601 | rc = -EMFILE; | ||
602 | goto fail_free_buf; | ||
603 | } | ||
604 | rc = 0; | ||
605 | |||
606 | fail_free_buf: | ||
607 | free_page((unsigned long) buf); | ||
608 | fail_free_fcb: | ||
609 | kfree(fcb); | ||
610 | return rc; | ||
611 | } | ||
612 | |||
613 | static int verify_device(struct urdev *urd) | ||
614 | { | ||
552 | switch (urd->class) { | 615 | switch (urd->class) { |
553 | case DEV_CLASS_UR_O: | 616 | case DEV_CLASS_UR_O: |
554 | return 0; /* no check needed here */ | 617 | return 0; /* no check needed here */ |
555 | case DEV_CLASS_UR_I: | 618 | case DEV_CLASS_UR_I: |
556 | /* check for empty reader device (beginning of chain) */ | 619 | return verify_uri_device(urd); |
557 | rc = diag_read_next_file_info(&fcb, 0); | ||
558 | if (rc) | ||
559 | return rc; | ||
560 | |||
561 | /* open file on virtual reader */ | ||
562 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
563 | if (!buf) | ||
564 | return -ENOMEM; | ||
565 | rc = diag_read_file(urd->dev_id.devno, buf); | ||
566 | kfree(buf); | ||
567 | |||
568 | if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ | ||
569 | return rc; | ||
570 | return 0; | ||
571 | default: | 620 | default: |
572 | return -ENOTSUPP; | 621 | return -ENOTSUPP; |
573 | } | 622 | } |
574 | } | 623 | } |
575 | 624 | ||
576 | static int get_file_reclen(struct urdev *urd) | 625 | static int get_uri_file_reclen(struct urdev *urd) |
577 | { | 626 | { |
578 | struct file_control_block fcb; | 627 | struct file_control_block *fcb; |
579 | int rc; | 628 | int rc; |
580 | 629 | ||
630 | fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); | ||
631 | if (!fcb) | ||
632 | return -ENOMEM; | ||
633 | rc = diag_read_next_file_info(fcb, 0); | ||
634 | if (rc) | ||
635 | goto fail_free; | ||
636 | if (fcb->file_stat & FLG_CP_DUMP) | ||
637 | rc = 0; | ||
638 | else | ||
639 | rc = fcb->rec_len; | ||
640 | |||
641 | fail_free: | ||
642 | kfree(fcb); | ||
643 | return rc; | ||
644 | } | ||
645 | |||
646 | static int get_file_reclen(struct urdev *urd) | ||
647 | { | ||
581 | switch (urd->class) { | 648 | switch (urd->class) { |
582 | case DEV_CLASS_UR_O: | 649 | case DEV_CLASS_UR_O: |
583 | return 0; | 650 | return 0; |
584 | case DEV_CLASS_UR_I: | 651 | case DEV_CLASS_UR_I: |
585 | rc = diag_read_next_file_info(&fcb, 0); | 652 | return get_uri_file_reclen(urd); |
586 | if (rc) | ||
587 | return rc; | ||
588 | break; | ||
589 | default: | 653 | default: |
590 | return -ENOTSUPP; | 654 | return -ENOTSUPP; |
591 | } | 655 | } |
592 | if (fcb.file_stat & FLG_CP_DUMP) | ||
593 | return 0; | ||
594 | |||
595 | return fcb.rec_len; | ||
596 | } | 656 | } |
597 | 657 | ||
598 | static int ur_open(struct inode *inode, struct file *file) | 658 | static int ur_open(struct inode *inode, struct file *file) |
@@ -710,64 +770,63 @@ static struct file_operations ur_fops = { | |||
710 | 770 | ||
711 | /* | 771 | /* |
712 | * ccw_device infrastructure: | 772 | * ccw_device infrastructure: |
713 | * ur_probe gets its own ref to the device (i.e. get_device), | 773 | * ur_probe creates the struct urdev (with refcount = 1), the device |
714 | * creates the struct urdev, the device attributes, sets up | 774 | * attributes, sets up the interrupt handler and validates the virtual |
715 | * the interrupt handler and validates the virtual unit record device. | 775 | * unit record device. |
716 | * ur_remove removes the device attributes, frees the struct urdev | 776 | * ur_remove removes the device attributes and drops the reference to |
717 | * and drops (put_device) the ref to the device we got in ur_probe. | 777 | * struct urdev. |
778 | * | ||
779 | * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized | ||
780 | * by the vmur_mutex lock. | ||
781 | * | ||
782 | * urd->char_device is used as indication that the online function has | ||
783 | * been completed successfully. | ||
718 | */ | 784 | */ |
719 | static int ur_probe(struct ccw_device *cdev) | 785 | static int ur_probe(struct ccw_device *cdev) |
720 | { | 786 | { |
721 | struct urdev *urd; | 787 | struct urdev *urd; |
722 | int rc; | 788 | int rc; |
723 | 789 | ||
724 | TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private); | 790 | TRACE("ur_probe: cdev=%p\n", cdev); |
725 | |||
726 | if (!get_device(&cdev->dev)) | ||
727 | return -ENODEV; | ||
728 | 791 | ||
792 | mutex_lock(&vmur_mutex); | ||
729 | urd = urdev_alloc(cdev); | 793 | urd = urdev_alloc(cdev); |
730 | if (!urd) { | 794 | if (!urd) { |
731 | rc = -ENOMEM; | 795 | rc = -ENOMEM; |
732 | goto fail; | 796 | goto fail_unlock; |
733 | } | 797 | } |
798 | |||
734 | rc = ur_create_attributes(&cdev->dev); | 799 | rc = ur_create_attributes(&cdev->dev); |
735 | if (rc) { | 800 | if (rc) { |
736 | rc = -ENOMEM; | 801 | rc = -ENOMEM; |
737 | goto fail; | 802 | goto fail_urdev_put; |
738 | } | 803 | } |
739 | cdev->dev.driver_data = urd; | ||
740 | cdev->handler = ur_int_handler; | 804 | cdev->handler = ur_int_handler; |
741 | 805 | ||
742 | /* validate virtual unit record device */ | 806 | /* validate virtual unit record device */ |
743 | urd->class = get_urd_class(urd); | 807 | urd->class = get_urd_class(urd); |
744 | if (urd->class < 0) { | 808 | if (urd->class < 0) { |
745 | rc = urd->class; | 809 | rc = urd->class; |
746 | goto fail; | 810 | goto fail_remove_attr; |
747 | } | 811 | } |
748 | if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { | 812 | if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { |
749 | rc = -ENOTSUPP; | 813 | rc = -ENOTSUPP; |
750 | goto fail; | 814 | goto fail_remove_attr; |
751 | } | 815 | } |
816 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
817 | cdev->dev.driver_data = urd; | ||
818 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
752 | 819 | ||
820 | mutex_unlock(&vmur_mutex); | ||
753 | return 0; | 821 | return 0; |
754 | 822 | ||
755 | fail: | 823 | fail_remove_attr: |
756 | urdev_free(urd); | ||
757 | put_device(&cdev->dev); | ||
758 | return rc; | ||
759 | } | ||
760 | |||
761 | static void ur_remove(struct ccw_device *cdev) | ||
762 | { | ||
763 | struct urdev *urd = cdev->dev.driver_data; | ||
764 | |||
765 | TRACE("ur_remove\n"); | ||
766 | if (cdev->online) | ||
767 | ur_set_offline(cdev); | ||
768 | ur_remove_attributes(&cdev->dev); | 824 | ur_remove_attributes(&cdev->dev); |
769 | urdev_free(urd); | 825 | fail_urdev_put: |
770 | put_device(&cdev->dev); | 826 | urdev_put(urd); |
827 | fail_unlock: | ||
828 | mutex_unlock(&vmur_mutex); | ||
829 | return rc; | ||
771 | } | 830 | } |
772 | 831 | ||
773 | static int ur_set_online(struct ccw_device *cdev) | 832 | static int ur_set_online(struct ccw_device *cdev) |
@@ -776,20 +835,29 @@ static int ur_set_online(struct ccw_device *cdev) | |||
776 | int minor, major, rc; | 835 | int minor, major, rc; |
777 | char node_id[16]; | 836 | char node_id[16]; |
778 | 837 | ||
779 | TRACE("ur_set_online: cdev=%p state=%d\n", cdev, | 838 | TRACE("ur_set_online: cdev=%p\n", cdev); |
780 | *(int *) cdev->private); | ||
781 | 839 | ||
782 | if (!try_module_get(ur_driver.owner)) | 840 | mutex_lock(&vmur_mutex); |
783 | return -EINVAL; | 841 | urd = urdev_get_from_cdev(cdev); |
842 | if (!urd) { | ||
843 | /* ur_remove already deleted our urd */ | ||
844 | rc = -ENODEV; | ||
845 | goto fail_unlock; | ||
846 | } | ||
847 | |||
848 | if (urd->char_device) { | ||
849 | /* Another ur_set_online was faster */ | ||
850 | rc = -EBUSY; | ||
851 | goto fail_urdev_put; | ||
852 | } | ||
784 | 853 | ||
785 | urd = (struct urdev *) cdev->dev.driver_data; | ||
786 | minor = urd->dev_id.devno; | 854 | minor = urd->dev_id.devno; |
787 | major = MAJOR(ur_first_dev_maj_min); | 855 | major = MAJOR(ur_first_dev_maj_min); |
788 | 856 | ||
789 | urd->char_device = cdev_alloc(); | 857 | urd->char_device = cdev_alloc(); |
790 | if (!urd->char_device) { | 858 | if (!urd->char_device) { |
791 | rc = -ENOMEM; | 859 | rc = -ENOMEM; |
792 | goto fail_module_put; | 860 | goto fail_urdev_put; |
793 | } | 861 | } |
794 | 862 | ||
795 | cdev_init(urd->char_device, &ur_fops); | 863 | cdev_init(urd->char_device, &ur_fops); |
@@ -818,29 +886,79 @@ static int ur_set_online(struct ccw_device *cdev) | |||
818 | TRACE("ur_set_online: device_create rc=%d\n", rc); | 886 | TRACE("ur_set_online: device_create rc=%d\n", rc); |
819 | goto fail_free_cdev; | 887 | goto fail_free_cdev; |
820 | } | 888 | } |
821 | 889 | urdev_put(urd); | |
890 | mutex_unlock(&vmur_mutex); | ||
822 | return 0; | 891 | return 0; |
823 | 892 | ||
824 | fail_free_cdev: | 893 | fail_free_cdev: |
825 | cdev_del(urd->char_device); | 894 | cdev_del(urd->char_device); |
826 | fail_module_put: | 895 | urd->char_device = NULL; |
827 | module_put(ur_driver.owner); | 896 | fail_urdev_put: |
828 | 897 | urdev_put(urd); | |
898 | fail_unlock: | ||
899 | mutex_unlock(&vmur_mutex); | ||
829 | return rc; | 900 | return rc; |
830 | } | 901 | } |
831 | 902 | ||
832 | static int ur_set_offline(struct ccw_device *cdev) | 903 | static int ur_set_offline_force(struct ccw_device *cdev, int force) |
833 | { | 904 | { |
834 | struct urdev *urd; | 905 | struct urdev *urd; |
906 | int rc; | ||
835 | 907 | ||
836 | TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n", | 908 | TRACE("ur_set_offline: cdev=%p\n", cdev); |
837 | cdev, cdev->private, *(int *) cdev->private); | 909 | urd = urdev_get_from_cdev(cdev); |
838 | urd = (struct urdev *) cdev->dev.driver_data; | 910 | if (!urd) |
911 | /* ur_remove already deleted our urd */ | ||
912 | return -ENODEV; | ||
913 | if (!urd->char_device) { | ||
914 | /* Another ur_set_offline was faster */ | ||
915 | rc = -EBUSY; | ||
916 | goto fail_urdev_put; | ||
917 | } | ||
918 | if (!force && (atomic_read(&urd->ref_count) > 2)) { | ||
919 | /* There is still a user of urd (e.g. ur_open) */ | ||
920 | TRACE("ur_set_offline: BUSY\n"); | ||
921 | rc = -EBUSY; | ||
922 | goto fail_urdev_put; | ||
923 | } | ||
839 | device_destroy(vmur_class, urd->char_device->dev); | 924 | device_destroy(vmur_class, urd->char_device->dev); |
840 | cdev_del(urd->char_device); | 925 | cdev_del(urd->char_device); |
841 | module_put(ur_driver.owner); | 926 | urd->char_device = NULL; |
927 | rc = 0; | ||
842 | 928 | ||
843 | return 0; | 929 | fail_urdev_put: |
930 | urdev_put(urd); | ||
931 | return rc; | ||
932 | } | ||
933 | |||
934 | static int ur_set_offline(struct ccw_device *cdev) | ||
935 | { | ||
936 | int rc; | ||
937 | |||
938 | mutex_lock(&vmur_mutex); | ||
939 | rc = ur_set_offline_force(cdev, 0); | ||
940 | mutex_unlock(&vmur_mutex); | ||
941 | return rc; | ||
942 | } | ||
943 | |||
944 | static void ur_remove(struct ccw_device *cdev) | ||
945 | { | ||
946 | unsigned long flags; | ||
947 | |||
948 | TRACE("ur_remove\n"); | ||
949 | |||
950 | mutex_lock(&vmur_mutex); | ||
951 | |||
952 | if (cdev->online) | ||
953 | ur_set_offline_force(cdev, 1); | ||
954 | ur_remove_attributes(&cdev->dev); | ||
955 | |||
956 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | ||
957 | urdev_put(cdev->dev.driver_data); | ||
958 | cdev->dev.driver_data = NULL; | ||
959 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
960 | |||
961 | mutex_unlock(&vmur_mutex); | ||
844 | } | 962 | } |
845 | 963 | ||
846 | /* | 964 | /* |
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h index 16d0a4e38e40..fa959644735a 100644 --- a/drivers/s390/char/vmur.h +++ b/drivers/s390/char/vmur.h | |||
@@ -50,7 +50,10 @@ struct file_control_block { | |||
50 | char rest[200]; | 50 | char rest[200]; |
51 | } __attribute__ ((packed)); | 51 | } __attribute__ ((packed)); |
52 | 52 | ||
53 | #define FLG_CP_DUMP 0x10 | 53 | #define FLG_SYSTEM_HOLD 0x04 |
54 | #define FLG_CP_DUMP 0x10 | ||
55 | #define FLG_USER_HOLD 0x20 | ||
56 | #define FLG_IN_USE 0x80 | ||
54 | 57 | ||
55 | /* | 58 | /* |
56 | * A struct urdev is created for each ur device that is made available | 59 | * A struct urdev is created for each ur device that is made available |
@@ -67,6 +70,7 @@ struct urdev { | |||
67 | size_t reclen; /* Record length for *write* CCWs */ | 70 | size_t reclen; /* Record length for *write* CCWs */ |
68 | int class; /* VM device class */ | 71 | int class; /* VM device class */ |
69 | int io_request_rc; /* return code from I/O request */ | 72 | int io_request_rc; /* return code from I/O request */ |
73 | atomic_t ref_count; /* reference counter */ | ||
70 | }; | 74 | }; |
71 | 75 | ||
72 | /* | 76 | /* |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 02fd00b55e1b..34a796913b06 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -594,6 +594,9 @@ alloc_cmb (struct ccw_device *cdev) | |||
594 | free_pages((unsigned long)mem, get_order(size)); | 594 | free_pages((unsigned long)mem, get_order(size)); |
595 | } else if (!mem) { | 595 | } else if (!mem) { |
596 | /* no luck */ | 596 | /* no luck */ |
597 | printk(KERN_WARNING "cio: failed to allocate area " | ||
598 | "for measuring %d subchannels\n", | ||
599 | cmb_area.num_channels); | ||
597 | ret = -ENOMEM; | 600 | ret = -ENOMEM; |
598 | goto out; | 601 | goto out; |
599 | } else { | 602 | } else { |
@@ -1279,13 +1282,6 @@ init_cmf(void) | |||
1279 | case CMF_BASIC: | 1282 | case CMF_BASIC: |
1280 | format_string = "basic"; | 1283 | format_string = "basic"; |
1281 | cmbops = &cmbops_basic; | 1284 | cmbops = &cmbops_basic; |
1282 | if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) { | ||
1283 | printk(KERN_ERR "cio: Basic channel measurement " | ||
1284 | "facility can only use 1 to 4096 devices\n" | ||
1285 | KERN_ERR "when the cmf driver is built" | ||
1286 | " as a loadable module\n"); | ||
1287 | return 1; | ||
1288 | } | ||
1289 | break; | 1285 | break; |
1290 | case CMF_EXTENDED: | 1286 | case CMF_EXTENDED: |
1291 | format_string = "extended"; | 1287 | format_string = "extended"; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1c27a5a06b49..5635e656c1a3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -79,6 +79,7 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
79 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | 79 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; |
80 | ret = cio_modify(sch); | 80 | ret = cio_modify(sch); |
81 | if (ret) { | 81 | if (ret) { |
82 | kfree(sch->lock); | ||
82 | kfree(sch); | 83 | kfree(sch); |
83 | return ERR_PTR(ret); | 84 | return ERR_PTR(ret); |
84 | } | 85 | } |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 297659fa0e26..e44d92eac8e9 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -117,7 +117,10 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp, | |||
117 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); | 117 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); |
118 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, | 118 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
119 | "MODALIAS=%s", modalias_buf); | 119 | "MODALIAS=%s", modalias_buf); |
120 | return ret; | 120 | if (ret) |
121 | return ret; | ||
122 | envp[i] = NULL; | ||
123 | return 0; | ||
121 | } | 124 | } |
122 | 125 | ||
123 | struct bus_type ccw_bus_type; | 126 | struct bus_type ccw_bus_type; |
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 60b9347f7c92..f232832f2b22 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/delay.h> | 17 | #include <asm/delay.h> |
18 | #include <asm/cio.h> | 18 | #include <asm/cio.h> |
19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
20 | #include <asm/diag.h> | ||
20 | 21 | ||
21 | #include "cio.h" | 22 | #include "cio.h" |
22 | #include "cio_debug.h" | 23 | #include "cio_debug.h" |
@@ -25,51 +26,6 @@ | |||
25 | #include "ioasm.h" | 26 | #include "ioasm.h" |
26 | 27 | ||
27 | /* | 28 | /* |
28 | * diag210 is used under VM to get information about a virtual device | ||
29 | */ | ||
30 | int | ||
31 | diag210(struct diag210 * addr) | ||
32 | { | ||
33 | /* | ||
34 | * diag 210 needs its data below the 2GB border, so we | ||
35 | * use a static data area to be sure | ||
36 | */ | ||
37 | static struct diag210 diag210_tmp; | ||
38 | static DEFINE_SPINLOCK(diag210_lock); | ||
39 | unsigned long flags; | ||
40 | int ccode; | ||
41 | |||
42 | spin_lock_irqsave(&diag210_lock, flags); | ||
43 | diag210_tmp = *addr; | ||
44 | |||
45 | #ifdef CONFIG_64BIT | ||
46 | asm volatile( | ||
47 | " lhi %0,-1\n" | ||
48 | " sam31\n" | ||
49 | " diag %1,0,0x210\n" | ||
50 | "0: ipm %0\n" | ||
51 | " srl %0,28\n" | ||
52 | "1: sam64\n" | ||
53 | EX_TABLE(0b,1b) | ||
54 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | ||
55 | #else | ||
56 | asm volatile( | ||
57 | " lhi %0,-1\n" | ||
58 | " diag %1,0,0x210\n" | ||
59 | "0: ipm %0\n" | ||
60 | " srl %0,28\n" | ||
61 | "1:\n" | ||
62 | EX_TABLE(0b,1b) | ||
63 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | ||
64 | #endif | ||
65 | |||
66 | *addr = diag210_tmp; | ||
67 | spin_unlock_irqrestore(&diag210_lock, flags); | ||
68 | |||
69 | return ccode; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Input : | 29 | * Input : |
74 | * devno - device number | 30 | * devno - device number |
75 | * ps - pointer to sense ID data area | 31 | * ps - pointer to sense ID data area |
@@ -349,5 +305,3 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
349 | break; | 305 | break; |
350 | } | 306 | } |
351 | } | 307 | } |
352 | |||
353 | EXPORT_SYMBOL(diag210); | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index ed026a1dc324..d8d479876ec7 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -81,6 +81,7 @@ static __u32 volatile spare_indicator; | |||
81 | static atomic_t spare_indicator_usecount; | 81 | static atomic_t spare_indicator_usecount; |
82 | #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 | 82 | #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 |
83 | static mempool_t *qdio_mempool_scssc; | 83 | static mempool_t *qdio_mempool_scssc; |
84 | static struct kmem_cache *qdio_q_cache; | ||
84 | 85 | ||
85 | static debug_info_t *qdio_dbf_setup; | 86 | static debug_info_t *qdio_dbf_setup; |
86 | static debug_info_t *qdio_dbf_sbal; | 87 | static debug_info_t *qdio_dbf_sbal; |
@@ -194,6 +195,8 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | |||
194 | again: | 195 | again: |
195 | ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); | 196 | ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); |
196 | rc = qdio_check_ccq(q, ccq); | 197 | rc = qdio_check_ccq(q, ccq); |
198 | if ((ccq == 96) && (tmp_cnt != *cnt)) | ||
199 | rc = 0; | ||
197 | if (rc == 1) { | 200 | if (rc == 1) { |
198 | QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); | 201 | QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); |
199 | goto again; | 202 | goto again; |
@@ -739,7 +742,8 @@ qdio_get_outbound_buffer_frontier(struct qdio_q *q) | |||
739 | first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), | 742 | first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), |
740 | (QDIO_MAX_BUFFERS_PER_Q-1)); | 743 | (QDIO_MAX_BUFFERS_PER_Q-1)); |
741 | 744 | ||
742 | if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis)) | 745 | if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) || |
746 | (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) | ||
743 | SYNC_MEMORY; | 747 | SYNC_MEMORY; |
744 | 748 | ||
745 | check_next: | 749 | check_next: |
@@ -1617,23 +1621,21 @@ static void | |||
1617 | qdio_release_irq_memory(struct qdio_irq *irq_ptr) | 1621 | qdio_release_irq_memory(struct qdio_irq *irq_ptr) |
1618 | { | 1622 | { |
1619 | int i; | 1623 | int i; |
1624 | struct qdio_q *q; | ||
1620 | 1625 | ||
1621 | for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) { | 1626 | for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { |
1622 | if (!irq_ptr->input_qs[i]) | 1627 | q = irq_ptr->input_qs[i]; |
1623 | goto next; | 1628 | if (q) { |
1624 | 1629 | free_page((unsigned long) q->slib); | |
1625 | kfree(irq_ptr->input_qs[i]->slib); | 1630 | kmem_cache_free(qdio_q_cache, q); |
1626 | kfree(irq_ptr->input_qs[i]); | 1631 | } |
1627 | 1632 | q = irq_ptr->output_qs[i]; | |
1628 | next: | 1633 | if (q) { |
1629 | if (!irq_ptr->output_qs[i]) | 1634 | free_page((unsigned long) q->slib); |
1630 | continue; | 1635 | kmem_cache_free(qdio_q_cache, q); |
1631 | 1636 | } | |
1632 | kfree(irq_ptr->output_qs[i]->slib); | ||
1633 | kfree(irq_ptr->output_qs[i]); | ||
1634 | |||
1635 | } | 1637 | } |
1636 | kfree(irq_ptr->qdr); | 1638 | free_page((unsigned long) irq_ptr->qdr); |
1637 | free_page((unsigned long) irq_ptr); | 1639 | free_page((unsigned long) irq_ptr); |
1638 | } | 1640 | } |
1639 | 1641 | ||
@@ -1680,44 +1682,35 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr, | |||
1680 | { | 1682 | { |
1681 | int i; | 1683 | int i; |
1682 | struct qdio_q *q; | 1684 | struct qdio_q *q; |
1683 | int result=-ENOMEM; | ||
1684 | |||
1685 | for (i=0;i<no_input_qs;i++) { | ||
1686 | q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); | ||
1687 | 1685 | ||
1688 | if (!q) { | 1686 | for (i = 0; i < no_input_qs; i++) { |
1689 | QDIO_PRINT_ERR("kmalloc of q failed!\n"); | 1687 | q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); |
1690 | goto out; | 1688 | if (!q) |
1691 | } | 1689 | return -ENOMEM; |
1690 | memset(q, 0, sizeof(*q)); | ||
1692 | 1691 | ||
1693 | q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1692 | q->slib = (struct slib *) __get_free_page(GFP_KERNEL); |
1694 | if (!q->slib) { | 1693 | if (!q->slib) { |
1695 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | 1694 | kmem_cache_free(qdio_q_cache, q); |
1696 | goto out; | 1695 | return -ENOMEM; |
1697 | } | 1696 | } |
1698 | |||
1699 | irq_ptr->input_qs[i]=q; | 1697 | irq_ptr->input_qs[i]=q; |
1700 | } | 1698 | } |
1701 | 1699 | ||
1702 | for (i=0;i<no_output_qs;i++) { | 1700 | for (i = 0; i < no_output_qs; i++) { |
1703 | q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); | 1701 | q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); |
1704 | 1702 | if (!q) | |
1705 | if (!q) { | 1703 | return -ENOMEM; |
1706 | goto out; | 1704 | memset(q, 0, sizeof(*q)); |
1707 | } | ||
1708 | 1705 | ||
1709 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); | 1706 | q->slib = (struct slib *) __get_free_page(GFP_KERNEL); |
1710 | if (!q->slib) { | 1707 | if (!q->slib) { |
1711 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | 1708 | kmem_cache_free(qdio_q_cache, q); |
1712 | goto out; | 1709 | return -ENOMEM; |
1713 | } | 1710 | } |
1714 | |||
1715 | irq_ptr->output_qs[i]=q; | 1711 | irq_ptr->output_qs[i]=q; |
1716 | } | 1712 | } |
1717 | 1713 | return 0; | |
1718 | result=0; | ||
1719 | out: | ||
1720 | return result; | ||
1721 | } | 1714 | } |
1722 | 1715 | ||
1723 | static void | 1716 | static void |
@@ -2985,17 +2978,17 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
2985 | QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); | 2978 | QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); |
2986 | 2979 | ||
2987 | if (!irq_ptr) { | 2980 | if (!irq_ptr) { |
2988 | QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n"); | 2981 | QDIO_PRINT_ERR("allocation of irq_ptr failed!\n"); |
2989 | return -ENOMEM; | 2982 | return -ENOMEM; |
2990 | } | 2983 | } |
2991 | 2984 | ||
2992 | init_MUTEX(&irq_ptr->setting_up_sema); | 2985 | init_MUTEX(&irq_ptr->setting_up_sema); |
2993 | 2986 | ||
2994 | /* QDR must be in DMA area since CCW data address is only 32 bit */ | 2987 | /* QDR must be in DMA area since CCW data address is only 32 bit */ |
2995 | irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); | 2988 | irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA); |
2996 | if (!(irq_ptr->qdr)) { | 2989 | if (!(irq_ptr->qdr)) { |
2997 | free_page((unsigned long) irq_ptr); | 2990 | free_page((unsigned long) irq_ptr); |
2998 | QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); | 2991 | QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n"); |
2999 | return -ENOMEM; | 2992 | return -ENOMEM; |
3000 | } | 2993 | } |
3001 | QDIO_DBF_TEXT0(0,setup,"qdr:"); | 2994 | QDIO_DBF_TEXT0(0,setup,"qdr:"); |
@@ -3004,6 +2997,7 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
3004 | if (qdio_alloc_qs(irq_ptr, | 2997 | if (qdio_alloc_qs(irq_ptr, |
3005 | init_data->no_input_qs, | 2998 | init_data->no_input_qs, |
3006 | init_data->no_output_qs)) { | 2999 | init_data->no_output_qs)) { |
3000 | QDIO_PRINT_ERR("queue allocation failed!\n"); | ||
3007 | qdio_release_irq_memory(irq_ptr); | 3001 | qdio_release_irq_memory(irq_ptr); |
3008 | return -ENOMEM; | 3002 | return -ENOMEM; |
3009 | } | 3003 | } |
@@ -3895,9 +3889,19 @@ init_QDIO(void) | |||
3895 | if (res) | 3889 | if (res) |
3896 | return res; | 3890 | return res; |
3897 | 3891 | ||
3892 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), | ||
3893 | 256, 0, NULL); | ||
3894 | if (!qdio_q_cache) { | ||
3895 | qdio_release_qdio_memory(); | ||
3896 | return -ENOMEM; | ||
3897 | } | ||
3898 | |||
3898 | res = qdio_register_dbf_views(); | 3899 | res = qdio_register_dbf_views(); |
3899 | if (res) | 3900 | if (res) { |
3901 | kmem_cache_destroy(qdio_q_cache); | ||
3902 | qdio_release_qdio_memory(); | ||
3900 | return res; | 3903 | return res; |
3904 | } | ||
3901 | 3905 | ||
3902 | QDIO_DBF_TEXT0(0,setup,"initQDIO"); | 3906 | QDIO_DBF_TEXT0(0,setup,"initQDIO"); |
3903 | res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); | 3907 | res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); |
@@ -3929,6 +3933,7 @@ cleanup_QDIO(void) | |||
3929 | qdio_release_qdio_memory(); | 3933 | qdio_release_qdio_memory(); |
3930 | qdio_unregister_dbf_views(); | 3934 | qdio_unregister_dbf_views(); |
3931 | mempool_destroy(qdio_mempool_scssc); | 3935 | mempool_destroy(qdio_mempool_scssc); |
3936 | kmem_cache_destroy(qdio_q_cache); | ||
3932 | bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); | 3937 | bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); |
3933 | printk("qdio: %s: module removed\n",version); | 3938 | printk("qdio: %s: module removed\n",version); |
3934 | } | 3939 | } |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b240800b78d7..99299976e891 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -4154,8 +4154,9 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req) | |||
4154 | fcp_rsp_iu->fcp_resid, | 4154 | fcp_rsp_iu->fcp_resid, |
4155 | (int) zfcp_get_fcp_dl(fcp_cmnd_iu)); | 4155 | (int) zfcp_get_fcp_dl(fcp_cmnd_iu)); |
4156 | 4156 | ||
4157 | scpnt->resid = fcp_rsp_iu->fcp_resid; | 4157 | scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); |
4158 | if (scpnt->request_bufflen - scpnt->resid < scpnt->underflow) | 4158 | if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < |
4159 | scpnt->underflow) | ||
4159 | set_host_byte(&scpnt->result, DID_ERROR); | 4160 | set_host_byte(&scpnt->result, DID_ERROR); |
4160 | } | 4161 | } |
4161 | 4162 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index c408badd2ae9..81daa8204bfe 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -36,8 +36,6 @@ static void zfcp_qdio_sbale_fill | |||
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | 36 | (struct zfcp_fsf_req *, unsigned long, void *, int); |
37 | static int zfcp_qdio_sbals_from_segment | 37 | static int zfcp_qdio_sbals_from_segment |
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | 38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); |
39 | static int zfcp_qdio_sbals_from_buffer | ||
40 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); | ||
41 | 39 | ||
42 | static qdio_handler_t zfcp_qdio_request_handler; | 40 | static qdio_handler_t zfcp_qdio_request_handler; |
43 | static qdio_handler_t zfcp_qdio_response_handler; | 41 | static qdio_handler_t zfcp_qdio_response_handler; |
@@ -632,28 +630,6 @@ out: | |||
632 | 630 | ||
633 | 631 | ||
634 | /** | 632 | /** |
635 | * zfcp_qdio_sbals_from_buffer - fill SBALs from buffer | ||
636 | * @fsf_req: request to be processed | ||
637 | * @sbtype: SBALE flags | ||
638 | * @buffer: data buffer | ||
639 | * @length: length of buffer | ||
640 | * @max_sbals: upper bound for number of SBALs to be used | ||
641 | */ | ||
642 | static int | ||
643 | zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
644 | void *buffer, unsigned long length, int max_sbals) | ||
645 | { | ||
646 | struct scatterlist sg_segment; | ||
647 | |||
648 | zfcp_address_to_sg(buffer, &sg_segment); | ||
649 | sg_segment.length = length; | ||
650 | |||
651 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1, | ||
652 | max_sbals); | ||
653 | } | ||
654 | |||
655 | |||
656 | /** | ||
657 | * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command | 633 | * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command |
658 | * @fsf_req: request to be processed | 634 | * @fsf_req: request to be processed |
659 | * @sbtype: SBALE flags | 635 | * @sbtype: SBALE flags |
@@ -664,18 +640,13 @@ int | |||
664 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | 640 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, |
665 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | 641 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) |
666 | { | 642 | { |
667 | if (scsi_cmnd->use_sg) { | 643 | if (scsi_sg_count(scsi_cmnd)) |
668 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, | 644 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, |
669 | (struct scatterlist *) | 645 | scsi_sglist(scsi_cmnd), |
670 | scsi_cmnd->request_buffer, | 646 | scsi_sg_count(scsi_cmnd), |
671 | scsi_cmnd->use_sg, | 647 | ZFCP_MAX_SBALS_PER_REQ); |
672 | ZFCP_MAX_SBALS_PER_REQ); | 648 | else |
673 | } else { | 649 | return 0; |
674 | return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype, | ||
675 | scsi_cmnd->request_buffer, | ||
676 | scsi_cmnd->request_bufflen, | ||
677 | ZFCP_MAX_SBALS_PER_REQ); | ||
678 | } | ||
679 | } | 650 | } |
680 | 651 | ||
681 | /** | 652 | /** |