aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/char/vmur.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char/vmur.c')
-rw-r--r--drivers/s390/char/vmur.c176
1 files changed, 108 insertions, 68 deletions
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 161867cebd8c..04b19bdc09da 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -119,10 +119,12 @@ static void urdev_put(struct urdev *urd)
119/* 119/*
120 * Low-level functions to do I/O to a ur device. 120 * Low-level functions to do I/O to a ur device.
121 * alloc_chan_prog 121 * alloc_chan_prog
122 * free_chan_prog
122 * do_ur_io 123 * do_ur_io
123 * ur_int_handler 124 * ur_int_handler
124 * 125 *
125 * alloc_chan_prog allocates and builds the channel program 126 * alloc_chan_prog allocates and builds the channel program
127 * free_chan_prog frees memory of the channel program
126 * 128 *
127 * do_ur_io issues the channel program to the device and blocks waiting 129 * do_ur_io issues the channel program to the device and blocks waiting
128 * on a completion event it publishes at urd->io_done. The function 130 * on a completion event it publishes at urd->io_done. The function
@@ -137,6 +139,16 @@ static void urdev_put(struct urdev *urd)
137 * address pointer that alloc_chan_prog returned. 139 * address pointer that alloc_chan_prog returned.
138 */ 140 */
139 141
142static void free_chan_prog(struct ccw1 *cpa)
143{
144 struct ccw1 *ptr = cpa;
145
146 while (ptr->cda) {
147 kfree((void *)(addr_t) ptr->cda);
148 ptr++;
149 }
150 kfree(cpa);
151}
140 152
141/* 153/*
142 * alloc_chan_prog 154 * alloc_chan_prog
@@ -144,44 +156,45 @@ static void urdev_put(struct urdev *urd)
144 * with a final NOP CCW command-chained on (which ensures that CE and DE 156 * with a final NOP CCW command-chained on (which ensures that CE and DE
145 * are presented together in a single interrupt instead of as separate 157 * are presented together in a single interrupt instead of as separate
146 * interrupts unless an incorrect length indication kicks in first). The 158 * interrupts unless an incorrect length indication kicks in first). The
147 * data length in each CCW is reclen. The caller must ensure that count 159 * data length in each CCW is reclen.
148 * is an integral multiple of reclen.
149 * The channel program pointer returned by this function must be freed
150 * with kfree. The caller is responsible for checking that
151 * count/reclen is not ridiculously large.
152 */ 160 */
153static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) 161static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
162 int reclen)
154{ 163{
155 size_t num_ccws;
156 struct ccw1 *cpa; 164 struct ccw1 *cpa;
165 void *kbuf;
157 int i; 166 int i;
158 167
159 TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); 168 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
160 169
161 /* 170 /*
162 * We chain a NOP onto the writes to force CE+DE together. 171 * We chain a NOP onto the writes to force CE+DE together.
163 * That means we allocate room for CCWs to cover count/reclen 172 * That means we allocate room for CCWs to cover count/reclen
164 * records plus a NOP. 173 * records plus a NOP.
165 */ 174 */
166 num_ccws = count / reclen + 1; 175 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
167 cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 176 GFP_KERNEL | GFP_DMA);
168 if (!cpa) 177 if (!cpa)
169 return NULL; 178 return ERR_PTR(-ENOMEM);
170 179
171 for (i = 0; count; i++) { 180 for (i = 0; i < rec_count; i++) {
172 cpa[i].cmd_code = WRITE_CCW_CMD; 181 cpa[i].cmd_code = WRITE_CCW_CMD;
173 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 182 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
174 cpa[i].count = reclen; 183 cpa[i].count = reclen;
175 cpa[i].cda = __pa(buf); 184 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
176 buf += reclen; 185 if (!kbuf) {
177 count -= reclen; 186 free_chan_prog(cpa);
187 return ERR_PTR(-ENOMEM);
188 }
189 cpa[i].cda = (u32)(addr_t) kbuf;
190 if (copy_from_user(kbuf, ubuf, reclen)) {
191 free_chan_prog(cpa);
192 return ERR_PTR(-EFAULT);
193 }
194 ubuf += reclen;
178 } 195 }
179 /* The following NOP CCW forces CE+DE to be presented together */ 196 /* The following NOP CCW forces CE+DE to be presented together */
180 cpa[i].cmd_code = CCW_CMD_NOOP; 197 cpa[i].cmd_code = CCW_CMD_NOOP;
181 cpa[i].flags = 0;
182 cpa[i].count = 0;
183 cpa[i].cda = 0;
184
185 return cpa; 198 return cpa;
186} 199}
187 200
@@ -189,7 +202,7 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
189{ 202{
190 int rc; 203 int rc;
191 struct ccw_device *cdev = urd->cdev; 204 struct ccw_device *cdev = urd->cdev;
192 DECLARE_COMPLETION(event); 205 DECLARE_COMPLETION_ONSTACK(event);
193 206
194 TRACE("do_ur_io: cpa=%p\n", cpa); 207 TRACE("do_ur_io: cpa=%p\n", cpa);
195 208
@@ -325,24 +338,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
325 size_t count, size_t reclen, loff_t *ppos) 338 size_t count, size_t reclen, loff_t *ppos)
326{ 339{
327 struct ccw1 *cpa; 340 struct ccw1 *cpa;
328 char *buf;
329 int rc; 341 int rc;
330 342
331 /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ 343 cpa = alloc_chan_prog(udata, count / reclen, reclen);
332 buf = kmalloc(count, GFP_KERNEL | GFP_DMA); 344 if (IS_ERR(cpa))
333 if (!buf) 345 return PTR_ERR(cpa);
334 return -ENOMEM;
335
336 if (copy_from_user(buf, udata, count)) {
337 rc = -EFAULT;
338 goto fail_kfree_buf;
339 }
340
341 cpa = alloc_chan_prog(buf, count, reclen);
342 if (!cpa) {
343 rc = -ENOMEM;
344 goto fail_kfree_buf;
345 }
346 346
347 rc = do_ur_io(urd, cpa); 347 rc = do_ur_io(urd, cpa);
348 if (rc) 348 if (rc)
@@ -354,10 +354,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
354 } 354 }
355 *ppos += count; 355 *ppos += count;
356 rc = count; 356 rc = count;
357
357fail_kfree_cpa: 358fail_kfree_cpa:
358 kfree(cpa); 359 free_chan_prog(cpa);
359fail_kfree_buf:
360 kfree(buf);
361 return rc; 360 return rc;
362} 361}
363 362
@@ -473,7 +472,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
473 return rc; 472 return rc;
474 473
475 len = min((size_t) PAGE_SIZE, count); 474 len = min((size_t) PAGE_SIZE, count);
476 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 475 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
477 if (!buf) 476 if (!buf)
478 return -ENOMEM; 477 return -ENOMEM;
479 478
@@ -500,7 +499,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
500 *offs += copied; 499 *offs += copied;
501 rc = copied; 500 rc = copied;
502fail: 501fail:
503 kfree(buf); 502 free_page((unsigned long) buf);
504 return rc; 503 return rc;
505} 504}
506 505
@@ -543,56 +542,97 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
543 } 542 }
544} 543}
545 544
546static int verify_device(struct urdev *urd) 545static int verify_uri_device(struct urdev *urd)
547{ 546{
548 struct file_control_block fcb; 547 struct file_control_block *fcb;
549 char *buf; 548 char *buf;
550 int rc; 549 int rc;
551 550
551 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
552 if (!fcb)
553 return -ENOMEM;
554
555 /* check for empty reader device (beginning of chain) */
556 rc = diag_read_next_file_info(fcb, 0);
557 if (rc)
558 goto fail_free_fcb;
559
560 /* if file is in hold status, we do not read it */
561 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
562 rc = -EPERM;
563 goto fail_free_fcb;
564 }
565
566 /* open file on virtual reader */
567 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
568 if (!buf) {
569 rc = -ENOMEM;
570 goto fail_free_fcb;
571 }
572 rc = diag_read_file(urd->dev_id.devno, buf);
573 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
574 goto fail_free_buf;
575
576 /* check if the file on top of the queue is open now */
577 rc = diag_read_next_file_info(fcb, 0);
578 if (rc)
579 goto fail_free_buf;
580 if (!(fcb->file_stat & FLG_IN_USE)) {
581 rc = -EMFILE;
582 goto fail_free_buf;
583 }
584 rc = 0;
585
586fail_free_buf:
587 free_page((unsigned long) buf);
588fail_free_fcb:
589 kfree(fcb);
590 return rc;
591}
592
593static int verify_device(struct urdev *urd)
594{
552 switch (urd->class) { 595 switch (urd->class) {
553 case DEV_CLASS_UR_O: 596 case DEV_CLASS_UR_O:
554 return 0; /* no check needed here */ 597 return 0; /* no check needed here */
555 case DEV_CLASS_UR_I: 598 case DEV_CLASS_UR_I:
556 /* check for empty reader device (beginning of chain) */ 599 return verify_uri_device(urd);
557 rc = diag_read_next_file_info(&fcb, 0);
558 if (rc)
559 return rc;
560
561 /* open file on virtual reader */
562 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
563 if (!buf)
564 return -ENOMEM;
565 rc = diag_read_file(urd->dev_id.devno, buf);
566 kfree(buf);
567
568 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
569 return rc;
570 return 0;
571 default: 600 default:
572 return -ENOTSUPP; 601 return -ENOTSUPP;
573 } 602 }
574} 603}
575 604
576static int get_file_reclen(struct urdev *urd) 605static int get_uri_file_reclen(struct urdev *urd)
577{ 606{
578 struct file_control_block fcb; 607 struct file_control_block *fcb;
579 int rc; 608 int rc;
580 609
610 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
611 if (!fcb)
612 return -ENOMEM;
613 rc = diag_read_next_file_info(fcb, 0);
614 if (rc)
615 goto fail_free;
616 if (fcb->file_stat & FLG_CP_DUMP)
617 rc = 0;
618 else
619 rc = fcb->rec_len;
620
621fail_free:
622 kfree(fcb);
623 return rc;
624}
625
626static int get_file_reclen(struct urdev *urd)
627{
581 switch (urd->class) { 628 switch (urd->class) {
582 case DEV_CLASS_UR_O: 629 case DEV_CLASS_UR_O:
583 return 0; 630 return 0;
584 case DEV_CLASS_UR_I: 631 case DEV_CLASS_UR_I:
585 rc = diag_read_next_file_info(&fcb, 0); 632 return get_uri_file_reclen(urd);
586 if (rc)
587 return rc;
588 break;
589 default: 633 default:
590 return -ENOTSUPP; 634 return -ENOTSUPP;
591 } 635 }
592 if (fcb.file_stat & FLG_CP_DUMP)
593 return 0;
594
595 return fcb.rec_len;
596} 636}
597 637
598static int ur_open(struct inode *inode, struct file *file) 638static int ur_open(struct inode *inode, struct file *file)