aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char')
-rw-r--r--drivers/s390/char/monwriter.c6
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/char/vmur.c426
-rw-r--r--drivers/s390/char/vmur.h6
4 files changed, 284 insertions, 155 deletions
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 268598ef3efe..20442fbf9346 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -17,6 +17,7 @@
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/poll.h> 19#include <linux/poll.h>
20#include <linux/mutex.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -41,6 +42,7 @@ struct mon_private {
41 size_t hdr_to_read; 42 size_t hdr_to_read;
42 size_t data_to_read; 43 size_t data_to_read;
43 struct mon_buf *current_buf; 44 struct mon_buf *current_buf;
45 struct mutex thread_mutex;
44}; 46};
45 47
46/* 48/*
@@ -179,6 +181,7 @@ static int monwrite_open(struct inode *inode, struct file *filp)
179 return -ENOMEM; 181 return -ENOMEM;
180 INIT_LIST_HEAD(&monpriv->list); 182 INIT_LIST_HEAD(&monpriv->list);
181 monpriv->hdr_to_read = sizeof(monpriv->hdr); 183 monpriv->hdr_to_read = sizeof(monpriv->hdr);
184 mutex_init(&monpriv->thread_mutex);
182 filp->private_data = monpriv; 185 filp->private_data = monpriv;
183 return nonseekable_open(inode, filp); 186 return nonseekable_open(inode, filp);
184} 187}
@@ -209,6 +212,7 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
209 void *to; 212 void *to;
210 int rc; 213 int rc;
211 214
215 mutex_lock(&monpriv->thread_mutex);
212 for (written = 0; written < count; ) { 216 for (written = 0; written < count; ) {
213 if (monpriv->hdr_to_read) { 217 if (monpriv->hdr_to_read) {
214 len = min(count - written, monpriv->hdr_to_read); 218 len = min(count - written, monpriv->hdr_to_read);
@@ -247,11 +251,13 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
247 } 251 }
248 monpriv->hdr_to_read = sizeof(monpriv->hdr); 252 monpriv->hdr_to_read = sizeof(monpriv->hdr);
249 } 253 }
254 mutex_unlock(&monpriv->thread_mutex);
250 return written; 255 return written;
251 256
252out_error: 257out_error:
253 monpriv->data_to_read = 0; 258 monpriv->data_to_read = 0;
254 monpriv->hdr_to_read = sizeof(struct monwrite_hdr); 259 monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
260 mutex_unlock(&monpriv->thread_mutex);
255 return rc; 261 return rc;
256} 262}
257 263
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 4f2f81b16cfa..2edd5fb6d3dc 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -21,6 +21,7 @@
21#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
22#include <asm/cio.h> 22#include <asm/cio.h>
23#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
24#include <asm/diag.h>
24 25
25#include "raw3270.h" 26#include "raw3270.h"
26 27
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 161867cebd8c..d70a6e65bf14 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -14,6 +14,7 @@
14#include <asm/cio.h> 14#include <asm/cio.h>
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/debug.h> 16#include <asm/debug.h>
17#include <asm/diag.h>
17 18
18#include "vmur.h" 19#include "vmur.h"
19 20
@@ -68,8 +69,26 @@ static struct ccw_driver ur_driver = {
68 .set_offline = ur_set_offline, 69 .set_offline = ur_set_offline,
69}; 70};
70 71
72static DEFINE_MUTEX(vmur_mutex);
73
71/* 74/*
72 * Allocation, freeing, getting and putting of urdev structures 75 * Allocation, freeing, getting and putting of urdev structures
76 *
77 * Each ur device (urd) contains a reference to its corresponding ccw device
78 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
79 * ur device using the cdev->dev.driver_data pointer.
80 *
81 * urd references:
82 * - ur_probe gets a urd reference, ur_remove drops the reference
83 * (cdev->dev.driver_data)
84 * - ur_open gets a urd reference, ur_relase drops the reference
85 * (urf->urd)
86 *
87 * cdev references:
88 * - urdev_alloc get a cdev reference (urd->cdev)
89 * - urdev_free drops the cdev reference (urd->cdev)
90 *
91 * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
73 */ 92 */
74static struct urdev *urdev_alloc(struct ccw_device *cdev) 93static struct urdev *urdev_alloc(struct ccw_device *cdev)
75{ 94{
@@ -78,51 +97,72 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
78 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 97 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
79 if (!urd) 98 if (!urd)
80 return NULL; 99 return NULL;
81 urd->cdev = cdev;
82 urd->reclen = cdev->id.driver_info; 100 urd->reclen = cdev->id.driver_info;
83 ccw_device_get_id(cdev, &urd->dev_id); 101 ccw_device_get_id(cdev, &urd->dev_id);
84 mutex_init(&urd->io_mutex); 102 mutex_init(&urd->io_mutex);
85 mutex_init(&urd->open_mutex); 103 mutex_init(&urd->open_mutex);
104 atomic_set(&urd->ref_count, 1);
105 urd->cdev = cdev;
106 get_device(&cdev->dev);
86 return urd; 107 return urd;
87} 108}
88 109
89static void urdev_free(struct urdev *urd) 110static void urdev_free(struct urdev *urd)
90{ 111{
112 TRACE("urdev_free: %p\n", urd);
113 if (urd->cdev)
114 put_device(&urd->cdev->dev);
91 kfree(urd); 115 kfree(urd);
92} 116}
93 117
94/* 118static void urdev_get(struct urdev *urd)
95 * This is how the character device driver gets a reference to a 119{
96 * ur device. When this call returns successfully, a reference has 120 atomic_inc(&urd->ref_count);
97 * been taken (by get_device) on the underlying kobject. The recipient 121}
98 * of this urdev pointer must eventually drop it with urdev_put(urd) 122
99 * which does the corresponding put_device(). 123static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
100 */ 124{
125 struct urdev *urd;
126 unsigned long flags;
127
128 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
129 urd = cdev->dev.driver_data;
130 if (urd)
131 urdev_get(urd);
132 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
133 return urd;
134}
135
101static struct urdev *urdev_get_from_devno(u16 devno) 136static struct urdev *urdev_get_from_devno(u16 devno)
102{ 137{
103 char bus_id[16]; 138 char bus_id[16];
104 struct ccw_device *cdev; 139 struct ccw_device *cdev;
140 struct urdev *urd;
105 141
106 sprintf(bus_id, "0.0.%04x", devno); 142 sprintf(bus_id, "0.0.%04x", devno);
107 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 143 cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
108 if (!cdev) 144 if (!cdev)
109 return NULL; 145 return NULL;
110 146 urd = urdev_get_from_cdev(cdev);
111 return cdev->dev.driver_data; 147 put_device(&cdev->dev);
148 return urd;
112} 149}
113 150
114static void urdev_put(struct urdev *urd) 151static void urdev_put(struct urdev *urd)
115{ 152{
116 put_device(&urd->cdev->dev); 153 if (atomic_dec_and_test(&urd->ref_count))
154 urdev_free(urd);
117} 155}
118 156
119/* 157/*
120 * Low-level functions to do I/O to a ur device. 158 * Low-level functions to do I/O to a ur device.
121 * alloc_chan_prog 159 * alloc_chan_prog
160 * free_chan_prog
122 * do_ur_io 161 * do_ur_io
123 * ur_int_handler 162 * ur_int_handler
124 * 163 *
125 * alloc_chan_prog allocates and builds the channel program 164 * alloc_chan_prog allocates and builds the channel program
165 * free_chan_prog frees memory of the channel program
126 * 166 *
127 * do_ur_io issues the channel program to the device and blocks waiting 167 * do_ur_io issues the channel program to the device and blocks waiting
128 * on a completion event it publishes at urd->io_done. The function 168 * on a completion event it publishes at urd->io_done. The function
@@ -137,6 +177,16 @@ static void urdev_put(struct urdev *urd)
137 * address pointer that alloc_chan_prog returned. 177 * address pointer that alloc_chan_prog returned.
138 */ 178 */
139 179
180static void free_chan_prog(struct ccw1 *cpa)
181{
182 struct ccw1 *ptr = cpa;
183
184 while (ptr->cda) {
185 kfree((void *)(addr_t) ptr->cda);
186 ptr++;
187 }
188 kfree(cpa);
189}
140 190
141/* 191/*
142 * alloc_chan_prog 192 * alloc_chan_prog
@@ -144,44 +194,45 @@ static void urdev_put(struct urdev *urd)
144 * with a final NOP CCW command-chained on (which ensures that CE and DE 194 * with a final NOP CCW command-chained on (which ensures that CE and DE
145 * are presented together in a single interrupt instead of as separate 195 * are presented together in a single interrupt instead of as separate
146 * interrupts unless an incorrect length indication kicks in first). The 196 * interrupts unless an incorrect length indication kicks in first). The
147 * data length in each CCW is reclen. The caller must ensure that count 197 * data length in each CCW is reclen.
148 * is an integral multiple of reclen.
149 * The channel program pointer returned by this function must be freed
150 * with kfree. The caller is responsible for checking that
151 * count/reclen is not ridiculously large.
152 */ 198 */
153static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) 199static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
200 int reclen)
154{ 201{
155 size_t num_ccws;
156 struct ccw1 *cpa; 202 struct ccw1 *cpa;
203 void *kbuf;
157 int i; 204 int i;
158 205
159 TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); 206 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
160 207
161 /* 208 /*
162 * We chain a NOP onto the writes to force CE+DE together. 209 * We chain a NOP onto the writes to force CE+DE together.
163 * That means we allocate room for CCWs to cover count/reclen 210 * That means we allocate room for CCWs to cover count/reclen
164 * records plus a NOP. 211 * records plus a NOP.
165 */ 212 */
166 num_ccws = count / reclen + 1; 213 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
167 cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 214 GFP_KERNEL | GFP_DMA);
168 if (!cpa) 215 if (!cpa)
169 return NULL; 216 return ERR_PTR(-ENOMEM);
170 217
171 for (i = 0; count; i++) { 218 for (i = 0; i < rec_count; i++) {
172 cpa[i].cmd_code = WRITE_CCW_CMD; 219 cpa[i].cmd_code = WRITE_CCW_CMD;
173 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 220 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
174 cpa[i].count = reclen; 221 cpa[i].count = reclen;
175 cpa[i].cda = __pa(buf); 222 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
176 buf += reclen; 223 if (!kbuf) {
177 count -= reclen; 224 free_chan_prog(cpa);
225 return ERR_PTR(-ENOMEM);
226 }
227 cpa[i].cda = (u32)(addr_t) kbuf;
228 if (copy_from_user(kbuf, ubuf, reclen)) {
229 free_chan_prog(cpa);
230 return ERR_PTR(-EFAULT);
231 }
232 ubuf += reclen;
178 } 233 }
179 /* The following NOP CCW forces CE+DE to be presented together */ 234 /* The following NOP CCW forces CE+DE to be presented together */
180 cpa[i].cmd_code = CCW_CMD_NOOP; 235 cpa[i].cmd_code = CCW_CMD_NOOP;
181 cpa[i].flags = 0;
182 cpa[i].count = 0;
183 cpa[i].cda = 0;
184
185 return cpa; 236 return cpa;
186} 237}
187 238
@@ -189,7 +240,7 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
189{ 240{
190 int rc; 241 int rc;
191 struct ccw_device *cdev = urd->cdev; 242 struct ccw_device *cdev = urd->cdev;
192 DECLARE_COMPLETION(event); 243 DECLARE_COMPLETION_ONSTACK(event);
193 244
194 TRACE("do_ur_io: cpa=%p\n", cpa); 245 TRACE("do_ur_io: cpa=%p\n", cpa);
195 246
@@ -232,6 +283,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
232 return; 283 return;
233 } 284 }
234 urd = cdev->dev.driver_data; 285 urd = cdev->dev.driver_data;
286 BUG_ON(!urd);
235 /* On special conditions irb is an error pointer */ 287 /* On special conditions irb is an error pointer */
236 if (IS_ERR(irb)) 288 if (IS_ERR(irb))
237 urd->io_request_rc = PTR_ERR(irb); 289 urd->io_request_rc = PTR_ERR(irb);
@@ -249,9 +301,15 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
249static ssize_t ur_attr_reclen_show(struct device *dev, 301static ssize_t ur_attr_reclen_show(struct device *dev,
250 struct device_attribute *attr, char *buf) 302 struct device_attribute *attr, char *buf)
251{ 303{
252 struct urdev *urd = dev->driver_data; 304 struct urdev *urd;
305 int rc;
253 306
254 return sprintf(buf, "%zu\n", urd->reclen); 307 urd = urdev_get_from_cdev(to_ccwdev(dev));
308 if (!urd)
309 return -ENODEV;
310 rc = sprintf(buf, "%zu\n", urd->reclen);
311 urdev_put(urd);
312 return rc;
255} 313}
256 314
257static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 315static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
@@ -325,24 +383,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
325 size_t count, size_t reclen, loff_t *ppos) 383 size_t count, size_t reclen, loff_t *ppos)
326{ 384{
327 struct ccw1 *cpa; 385 struct ccw1 *cpa;
328 char *buf;
329 int rc; 386 int rc;
330 387
331 /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ 388 cpa = alloc_chan_prog(udata, count / reclen, reclen);
332 buf = kmalloc(count, GFP_KERNEL | GFP_DMA); 389 if (IS_ERR(cpa))
333 if (!buf) 390 return PTR_ERR(cpa);
334 return -ENOMEM;
335
336 if (copy_from_user(buf, udata, count)) {
337 rc = -EFAULT;
338 goto fail_kfree_buf;
339 }
340
341 cpa = alloc_chan_prog(buf, count, reclen);
342 if (!cpa) {
343 rc = -ENOMEM;
344 goto fail_kfree_buf;
345 }
346 391
347 rc = do_ur_io(urd, cpa); 392 rc = do_ur_io(urd, cpa);
348 if (rc) 393 if (rc)
@@ -354,10 +399,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
354 } 399 }
355 *ppos += count; 400 *ppos += count;
356 rc = count; 401 rc = count;
402
357fail_kfree_cpa: 403fail_kfree_cpa:
358 kfree(cpa); 404 free_chan_prog(cpa);
359fail_kfree_buf:
360 kfree(buf);
361 return rc; 405 return rc;
362} 406}
363 407
@@ -380,31 +424,6 @@ static ssize_t ur_write(struct file *file, const char __user *udata,
380 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 424 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
381} 425}
382 426
383static int do_diag_14(unsigned long rx, unsigned long ry1,
384 unsigned long subcode)
385{
386 register unsigned long _ry1 asm("2") = ry1;
387 register unsigned long _ry2 asm("3") = subcode;
388 int rc = 0;
389
390 asm volatile(
391#ifdef CONFIG_64BIT
392 " sam31\n"
393 " diag %2,2,0x14\n"
394 " sam64\n"
395#else
396 " diag %2,2,0x14\n"
397#endif
398 " ipm %0\n"
399 " srl %0,28\n"
400 : "=d" (rc), "+d" (_ry2)
401 : "d" (rx), "d" (_ry1)
402 : "cc");
403
404 TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
405 return rc;
406}
407
408/* 427/*
409 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 428 * diagnose code 0x14 subcode 0x0028 - position spool file to designated
410 * record 429 * record
@@ -416,7 +435,7 @@ static int diag_position_to_record(int devno, int record)
416{ 435{
417 int cc; 436 int cc;
418 437
419 cc = do_diag_14(record, devno, 0x28); 438 cc = diag14(record, devno, 0x28);
420 switch (cc) { 439 switch (cc) {
421 case 0: 440 case 0:
422 return 0; 441 return 0;
@@ -441,7 +460,7 @@ static int diag_read_file(int devno, char *buf)
441{ 460{
442 int cc; 461 int cc;
443 462
444 cc = do_diag_14((unsigned long) buf, devno, 0x00); 463 cc = diag14((unsigned long) buf, devno, 0x00);
445 switch (cc) { 464 switch (cc) {
446 case 0: 465 case 0:
447 return 0; 466 return 0;
@@ -473,7 +492,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
473 return rc; 492 return rc;
474 493
475 len = min((size_t) PAGE_SIZE, count); 494 len = min((size_t) PAGE_SIZE, count);
476 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 495 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
477 if (!buf) 496 if (!buf)
478 return -ENOMEM; 497 return -ENOMEM;
479 498
@@ -500,7 +519,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
500 *offs += copied; 519 *offs += copied;
501 rc = copied; 520 rc = copied;
502fail: 521fail:
503 kfree(buf); 522 free_page((unsigned long) buf);
504 return rc; 523 return rc;
505} 524}
506 525
@@ -534,7 +553,7 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
534{ 553{
535 int cc; 554 int cc;
536 555
537 cc = do_diag_14((unsigned long) buf, spid, 0xfff); 556 cc = diag14((unsigned long) buf, spid, 0xfff);
538 switch (cc) { 557 switch (cc) {
539 case 0: 558 case 0:
540 return 0; 559 return 0;
@@ -543,56 +562,97 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
543 } 562 }
544} 563}
545 564
546static int verify_device(struct urdev *urd) 565static int verify_uri_device(struct urdev *urd)
547{ 566{
548 struct file_control_block fcb; 567 struct file_control_block *fcb;
549 char *buf; 568 char *buf;
550 int rc; 569 int rc;
551 570
571 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
572 if (!fcb)
573 return -ENOMEM;
574
575 /* check for empty reader device (beginning of chain) */
576 rc = diag_read_next_file_info(fcb, 0);
577 if (rc)
578 goto fail_free_fcb;
579
580 /* if file is in hold status, we do not read it */
581 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
582 rc = -EPERM;
583 goto fail_free_fcb;
584 }
585
586 /* open file on virtual reader */
587 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
588 if (!buf) {
589 rc = -ENOMEM;
590 goto fail_free_fcb;
591 }
592 rc = diag_read_file(urd->dev_id.devno, buf);
593 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
594 goto fail_free_buf;
595
596 /* check if the file on top of the queue is open now */
597 rc = diag_read_next_file_info(fcb, 0);
598 if (rc)
599 goto fail_free_buf;
600 if (!(fcb->file_stat & FLG_IN_USE)) {
601 rc = -EMFILE;
602 goto fail_free_buf;
603 }
604 rc = 0;
605
606fail_free_buf:
607 free_page((unsigned long) buf);
608fail_free_fcb:
609 kfree(fcb);
610 return rc;
611}
612
613static int verify_device(struct urdev *urd)
614{
552 switch (urd->class) { 615 switch (urd->class) {
553 case DEV_CLASS_UR_O: 616 case DEV_CLASS_UR_O:
554 return 0; /* no check needed here */ 617 return 0; /* no check needed here */
555 case DEV_CLASS_UR_I: 618 case DEV_CLASS_UR_I:
556 /* check for empty reader device (beginning of chain) */ 619 return verify_uri_device(urd);
557 rc = diag_read_next_file_info(&fcb, 0);
558 if (rc)
559 return rc;
560
561 /* open file on virtual reader */
562 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
563 if (!buf)
564 return -ENOMEM;
565 rc = diag_read_file(urd->dev_id.devno, buf);
566 kfree(buf);
567
568 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
569 return rc;
570 return 0;
571 default: 620 default:
572 return -ENOTSUPP; 621 return -ENOTSUPP;
573 } 622 }
574} 623}
575 624
576static int get_file_reclen(struct urdev *urd) 625static int get_uri_file_reclen(struct urdev *urd)
577{ 626{
578 struct file_control_block fcb; 627 struct file_control_block *fcb;
579 int rc; 628 int rc;
580 629
630 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
631 if (!fcb)
632 return -ENOMEM;
633 rc = diag_read_next_file_info(fcb, 0);
634 if (rc)
635 goto fail_free;
636 if (fcb->file_stat & FLG_CP_DUMP)
637 rc = 0;
638 else
639 rc = fcb->rec_len;
640
641fail_free:
642 kfree(fcb);
643 return rc;
644}
645
646static int get_file_reclen(struct urdev *urd)
647{
581 switch (urd->class) { 648 switch (urd->class) {
582 case DEV_CLASS_UR_O: 649 case DEV_CLASS_UR_O:
583 return 0; 650 return 0;
584 case DEV_CLASS_UR_I: 651 case DEV_CLASS_UR_I:
585 rc = diag_read_next_file_info(&fcb, 0); 652 return get_uri_file_reclen(urd);
586 if (rc)
587 return rc;
588 break;
589 default: 653 default:
590 return -ENOTSUPP; 654 return -ENOTSUPP;
591 } 655 }
592 if (fcb.file_stat & FLG_CP_DUMP)
593 return 0;
594
595 return fcb.rec_len;
596} 656}
597 657
598static int ur_open(struct inode *inode, struct file *file) 658static int ur_open(struct inode *inode, struct file *file)
@@ -710,64 +770,63 @@ static struct file_operations ur_fops = {
710 770
711/* 771/*
712 * ccw_device infrastructure: 772 * ccw_device infrastructure:
713 * ur_probe gets its own ref to the device (i.e. get_device), 773 * ur_probe creates the struct urdev (with refcount = 1), the device
714 * creates the struct urdev, the device attributes, sets up 774 * attributes, sets up the interrupt handler and validates the virtual
715 * the interrupt handler and validates the virtual unit record device. 775 * unit record device.
716 * ur_remove removes the device attributes, frees the struct urdev 776 * ur_remove removes the device attributes and drops the reference to
717 * and drops (put_device) the ref to the device we got in ur_probe. 777 * struct urdev.
778 *
779 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
780 * by the vmur_mutex lock.
781 *
782 * urd->char_device is used as indication that the online function has
783 * been completed successfully.
718 */ 784 */
719static int ur_probe(struct ccw_device *cdev) 785static int ur_probe(struct ccw_device *cdev)
720{ 786{
721 struct urdev *urd; 787 struct urdev *urd;
722 int rc; 788 int rc;
723 789
724 TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private); 790 TRACE("ur_probe: cdev=%p\n", cdev);
725
726 if (!get_device(&cdev->dev))
727 return -ENODEV;
728 791
792 mutex_lock(&vmur_mutex);
729 urd = urdev_alloc(cdev); 793 urd = urdev_alloc(cdev);
730 if (!urd) { 794 if (!urd) {
731 rc = -ENOMEM; 795 rc = -ENOMEM;
732 goto fail; 796 goto fail_unlock;
733 } 797 }
798
734 rc = ur_create_attributes(&cdev->dev); 799 rc = ur_create_attributes(&cdev->dev);
735 if (rc) { 800 if (rc) {
736 rc = -ENOMEM; 801 rc = -ENOMEM;
737 goto fail; 802 goto fail_urdev_put;
738 } 803 }
739 cdev->dev.driver_data = urd;
740 cdev->handler = ur_int_handler; 804 cdev->handler = ur_int_handler;
741 805
742 /* validate virtual unit record device */ 806 /* validate virtual unit record device */
743 urd->class = get_urd_class(urd); 807 urd->class = get_urd_class(urd);
744 if (urd->class < 0) { 808 if (urd->class < 0) {
745 rc = urd->class; 809 rc = urd->class;
746 goto fail; 810 goto fail_remove_attr;
747 } 811 }
748 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 812 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
749 rc = -ENOTSUPP; 813 rc = -ENOTSUPP;
750 goto fail; 814 goto fail_remove_attr;
751 } 815 }
816 spin_lock_irq(get_ccwdev_lock(cdev));
817 cdev->dev.driver_data = urd;
818 spin_unlock_irq(get_ccwdev_lock(cdev));
752 819
820 mutex_unlock(&vmur_mutex);
753 return 0; 821 return 0;
754 822
755fail: 823fail_remove_attr:
756 urdev_free(urd);
757 put_device(&cdev->dev);
758 return rc;
759}
760
761static void ur_remove(struct ccw_device *cdev)
762{
763 struct urdev *urd = cdev->dev.driver_data;
764
765 TRACE("ur_remove\n");
766 if (cdev->online)
767 ur_set_offline(cdev);
768 ur_remove_attributes(&cdev->dev); 824 ur_remove_attributes(&cdev->dev);
769 urdev_free(urd); 825fail_urdev_put:
770 put_device(&cdev->dev); 826 urdev_put(urd);
827fail_unlock:
828 mutex_unlock(&vmur_mutex);
829 return rc;
771} 830}
772 831
773static int ur_set_online(struct ccw_device *cdev) 832static int ur_set_online(struct ccw_device *cdev)
@@ -776,20 +835,29 @@ static int ur_set_online(struct ccw_device *cdev)
776 int minor, major, rc; 835 int minor, major, rc;
777 char node_id[16]; 836 char node_id[16];
778 837
779 TRACE("ur_set_online: cdev=%p state=%d\n", cdev, 838 TRACE("ur_set_online: cdev=%p\n", cdev);
780 *(int *) cdev->private);
781 839
782 if (!try_module_get(ur_driver.owner)) 840 mutex_lock(&vmur_mutex);
783 return -EINVAL; 841 urd = urdev_get_from_cdev(cdev);
842 if (!urd) {
843 /* ur_remove already deleted our urd */
844 rc = -ENODEV;
845 goto fail_unlock;
846 }
847
848 if (urd->char_device) {
849 /* Another ur_set_online was faster */
850 rc = -EBUSY;
851 goto fail_urdev_put;
852 }
784 853
785 urd = (struct urdev *) cdev->dev.driver_data;
786 minor = urd->dev_id.devno; 854 minor = urd->dev_id.devno;
787 major = MAJOR(ur_first_dev_maj_min); 855 major = MAJOR(ur_first_dev_maj_min);
788 856
789 urd->char_device = cdev_alloc(); 857 urd->char_device = cdev_alloc();
790 if (!urd->char_device) { 858 if (!urd->char_device) {
791 rc = -ENOMEM; 859 rc = -ENOMEM;
792 goto fail_module_put; 860 goto fail_urdev_put;
793 } 861 }
794 862
795 cdev_init(urd->char_device, &ur_fops); 863 cdev_init(urd->char_device, &ur_fops);
@@ -818,29 +886,79 @@ static int ur_set_online(struct ccw_device *cdev)
818 TRACE("ur_set_online: device_create rc=%d\n", rc); 886 TRACE("ur_set_online: device_create rc=%d\n", rc);
819 goto fail_free_cdev; 887 goto fail_free_cdev;
820 } 888 }
821 889 urdev_put(urd);
890 mutex_unlock(&vmur_mutex);
822 return 0; 891 return 0;
823 892
824fail_free_cdev: 893fail_free_cdev:
825 cdev_del(urd->char_device); 894 cdev_del(urd->char_device);
826fail_module_put: 895 urd->char_device = NULL;
827 module_put(ur_driver.owner); 896fail_urdev_put:
828 897 urdev_put(urd);
898fail_unlock:
899 mutex_unlock(&vmur_mutex);
829 return rc; 900 return rc;
830} 901}
831 902
832static int ur_set_offline(struct ccw_device *cdev) 903static int ur_set_offline_force(struct ccw_device *cdev, int force)
833{ 904{
834 struct urdev *urd; 905 struct urdev *urd;
906 int rc;
835 907
836 TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n", 908 TRACE("ur_set_offline: cdev=%p\n", cdev);
837 cdev, cdev->private, *(int *) cdev->private); 909 urd = urdev_get_from_cdev(cdev);
838 urd = (struct urdev *) cdev->dev.driver_data; 910 if (!urd)
911 /* ur_remove already deleted our urd */
912 return -ENODEV;
913 if (!urd->char_device) {
914 /* Another ur_set_offline was faster */
915 rc = -EBUSY;
916 goto fail_urdev_put;
917 }
918 if (!force && (atomic_read(&urd->ref_count) > 2)) {
919 /* There is still a user of urd (e.g. ur_open) */
920 TRACE("ur_set_offline: BUSY\n");
921 rc = -EBUSY;
922 goto fail_urdev_put;
923 }
839 device_destroy(vmur_class, urd->char_device->dev); 924 device_destroy(vmur_class, urd->char_device->dev);
840 cdev_del(urd->char_device); 925 cdev_del(urd->char_device);
841 module_put(ur_driver.owner); 926 urd->char_device = NULL;
927 rc = 0;
842 928
843 return 0; 929fail_urdev_put:
930 urdev_put(urd);
931 return rc;
932}
933
934static int ur_set_offline(struct ccw_device *cdev)
935{
936 int rc;
937
938 mutex_lock(&vmur_mutex);
939 rc = ur_set_offline_force(cdev, 0);
940 mutex_unlock(&vmur_mutex);
941 return rc;
942}
943
944static void ur_remove(struct ccw_device *cdev)
945{
946 unsigned long flags;
947
948 TRACE("ur_remove\n");
949
950 mutex_lock(&vmur_mutex);
951
952 if (cdev->online)
953 ur_set_offline_force(cdev, 1);
954 ur_remove_attributes(&cdev->dev);
955
956 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
957 urdev_put(cdev->dev.driver_data);
958 cdev->dev.driver_data = NULL;
959 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
960
961 mutex_unlock(&vmur_mutex);
844} 962}
845 963
846/* 964/*
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 16d0a4e38e40..fa959644735a 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -50,7 +50,10 @@ struct file_control_block {
50 char rest[200]; 50 char rest[200];
51} __attribute__ ((packed)); 51} __attribute__ ((packed));
52 52
53#define FLG_CP_DUMP 0x10 53#define FLG_SYSTEM_HOLD 0x04
54#define FLG_CP_DUMP 0x10
55#define FLG_USER_HOLD 0x20
56#define FLG_IN_USE 0x80
54 57
55/* 58/*
56 * A struct urdev is created for each ur device that is made available 59 * A struct urdev is created for each ur device that is made available
@@ -67,6 +70,7 @@ struct urdev {
67 size_t reclen; /* Record length for *write* CCWs */ 70 size_t reclen; /* Record length for *write* CCWs */
68 int class; /* VM device class */ 71 int class; /* VM device class */
69 int io_request_rc; /* return code from I/O request */ 72 int io_request_rc; /* return code from I/O request */
73 atomic_t ref_count; /* reference counter */
70}; 74};
71 75
72/* 76/*