aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/char/monwriter.c6
-rw-r--r--drivers/s390/char/vmur.c176
-rw-r--r--drivers/s390/char/vmur.h5
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/qdio.c92
5 files changed, 166 insertions, 114 deletions
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 268598ef3efe..20442fbf9346 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -17,6 +17,7 @@
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/poll.h> 19#include <linux/poll.h>
20#include <linux/mutex.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -41,6 +42,7 @@ struct mon_private {
41 size_t hdr_to_read; 42 size_t hdr_to_read;
42 size_t data_to_read; 43 size_t data_to_read;
43 struct mon_buf *current_buf; 44 struct mon_buf *current_buf;
45 struct mutex thread_mutex;
44}; 46};
45 47
46/* 48/*
@@ -179,6 +181,7 @@ static int monwrite_open(struct inode *inode, struct file *filp)
179 return -ENOMEM; 181 return -ENOMEM;
180 INIT_LIST_HEAD(&monpriv->list); 182 INIT_LIST_HEAD(&monpriv->list);
181 monpriv->hdr_to_read = sizeof(monpriv->hdr); 183 monpriv->hdr_to_read = sizeof(monpriv->hdr);
184 mutex_init(&monpriv->thread_mutex);
182 filp->private_data = monpriv; 185 filp->private_data = monpriv;
183 return nonseekable_open(inode, filp); 186 return nonseekable_open(inode, filp);
184} 187}
@@ -209,6 +212,7 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
209 void *to; 212 void *to;
210 int rc; 213 int rc;
211 214
215 mutex_lock(&monpriv->thread_mutex);
212 for (written = 0; written < count; ) { 216 for (written = 0; written < count; ) {
213 if (monpriv->hdr_to_read) { 217 if (monpriv->hdr_to_read) {
214 len = min(count - written, monpriv->hdr_to_read); 218 len = min(count - written, monpriv->hdr_to_read);
@@ -247,11 +251,13 @@ static ssize_t monwrite_write(struct file *filp, const char __user *data,
247 } 251 }
248 monpriv->hdr_to_read = sizeof(monpriv->hdr); 252 monpriv->hdr_to_read = sizeof(monpriv->hdr);
249 } 253 }
254 mutex_unlock(&monpriv->thread_mutex);
250 return written; 255 return written;
251 256
252out_error: 257out_error:
253 monpriv->data_to_read = 0; 258 monpriv->data_to_read = 0;
254 monpriv->hdr_to_read = sizeof(struct monwrite_hdr); 259 monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
260 mutex_unlock(&monpriv->thread_mutex);
255 return rc; 261 return rc;
256} 262}
257 263
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 161867cebd8c..04b19bdc09da 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -119,10 +119,12 @@ static void urdev_put(struct urdev *urd)
119/* 119/*
120 * Low-level functions to do I/O to a ur device. 120 * Low-level functions to do I/O to a ur device.
121 * alloc_chan_prog 121 * alloc_chan_prog
122 * free_chan_prog
122 * do_ur_io 123 * do_ur_io
123 * ur_int_handler 124 * ur_int_handler
124 * 125 *
125 * alloc_chan_prog allocates and builds the channel program 126 * alloc_chan_prog allocates and builds the channel program
127 * free_chan_prog frees memory of the channel program
126 * 128 *
127 * do_ur_io issues the channel program to the device and blocks waiting 129 * do_ur_io issues the channel program to the device and blocks waiting
128 * on a completion event it publishes at urd->io_done. The function 130 * on a completion event it publishes at urd->io_done. The function
@@ -137,6 +139,16 @@ static void urdev_put(struct urdev *urd)
137 * address pointer that alloc_chan_prog returned. 139 * address pointer that alloc_chan_prog returned.
138 */ 140 */
139 141
142static void free_chan_prog(struct ccw1 *cpa)
143{
144 struct ccw1 *ptr = cpa;
145
146 while (ptr->cda) {
147 kfree((void *)(addr_t) ptr->cda);
148 ptr++;
149 }
150 kfree(cpa);
151}
140 152
141/* 153/*
142 * alloc_chan_prog 154 * alloc_chan_prog
@@ -144,44 +156,45 @@ static void urdev_put(struct urdev *urd)
144 * with a final NOP CCW command-chained on (which ensures that CE and DE 156 * with a final NOP CCW command-chained on (which ensures that CE and DE
145 * are presented together in a single interrupt instead of as separate 157 * are presented together in a single interrupt instead of as separate
146 * interrupts unless an incorrect length indication kicks in first). The 158 * interrupts unless an incorrect length indication kicks in first). The
147 * data length in each CCW is reclen. The caller must ensure that count 159 * data length in each CCW is reclen.
148 * is an integral multiple of reclen.
149 * The channel program pointer returned by this function must be freed
150 * with kfree. The caller is responsible for checking that
151 * count/reclen is not ridiculously large.
152 */ 160 */
153static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) 161static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
162 int reclen)
154{ 163{
155 size_t num_ccws;
156 struct ccw1 *cpa; 164 struct ccw1 *cpa;
165 void *kbuf;
157 int i; 166 int i;
158 167
159 TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); 168 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
160 169
161 /* 170 /*
162 * We chain a NOP onto the writes to force CE+DE together. 171 * We chain a NOP onto the writes to force CE+DE together.
163 * That means we allocate room for CCWs to cover count/reclen 172 * That means we allocate room for CCWs to cover count/reclen
164 * records plus a NOP. 173 * records plus a NOP.
165 */ 174 */
166 num_ccws = count / reclen + 1; 175 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
167 cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 176 GFP_KERNEL | GFP_DMA);
168 if (!cpa) 177 if (!cpa)
169 return NULL; 178 return ERR_PTR(-ENOMEM);
170 179
171 for (i = 0; count; i++) { 180 for (i = 0; i < rec_count; i++) {
172 cpa[i].cmd_code = WRITE_CCW_CMD; 181 cpa[i].cmd_code = WRITE_CCW_CMD;
173 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 182 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
174 cpa[i].count = reclen; 183 cpa[i].count = reclen;
175 cpa[i].cda = __pa(buf); 184 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
176 buf += reclen; 185 if (!kbuf) {
177 count -= reclen; 186 free_chan_prog(cpa);
187 return ERR_PTR(-ENOMEM);
188 }
189 cpa[i].cda = (u32)(addr_t) kbuf;
190 if (copy_from_user(kbuf, ubuf, reclen)) {
191 free_chan_prog(cpa);
192 return ERR_PTR(-EFAULT);
193 }
194 ubuf += reclen;
178 } 195 }
179 /* The following NOP CCW forces CE+DE to be presented together */ 196 /* The following NOP CCW forces CE+DE to be presented together */
180 cpa[i].cmd_code = CCW_CMD_NOOP; 197 cpa[i].cmd_code = CCW_CMD_NOOP;
181 cpa[i].flags = 0;
182 cpa[i].count = 0;
183 cpa[i].cda = 0;
184
185 return cpa; 198 return cpa;
186} 199}
187 200
@@ -189,7 +202,7 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
189{ 202{
190 int rc; 203 int rc;
191 struct ccw_device *cdev = urd->cdev; 204 struct ccw_device *cdev = urd->cdev;
192 DECLARE_COMPLETION(event); 205 DECLARE_COMPLETION_ONSTACK(event);
193 206
194 TRACE("do_ur_io: cpa=%p\n", cpa); 207 TRACE("do_ur_io: cpa=%p\n", cpa);
195 208
@@ -325,24 +338,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
325 size_t count, size_t reclen, loff_t *ppos) 338 size_t count, size_t reclen, loff_t *ppos)
326{ 339{
327 struct ccw1 *cpa; 340 struct ccw1 *cpa;
328 char *buf;
329 int rc; 341 int rc;
330 342
331 /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ 343 cpa = alloc_chan_prog(udata, count / reclen, reclen);
332 buf = kmalloc(count, GFP_KERNEL | GFP_DMA); 344 if (IS_ERR(cpa))
333 if (!buf) 345 return PTR_ERR(cpa);
334 return -ENOMEM;
335
336 if (copy_from_user(buf, udata, count)) {
337 rc = -EFAULT;
338 goto fail_kfree_buf;
339 }
340
341 cpa = alloc_chan_prog(buf, count, reclen);
342 if (!cpa) {
343 rc = -ENOMEM;
344 goto fail_kfree_buf;
345 }
346 346
347 rc = do_ur_io(urd, cpa); 347 rc = do_ur_io(urd, cpa);
348 if (rc) 348 if (rc)
@@ -354,10 +354,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
354 } 354 }
355 *ppos += count; 355 *ppos += count;
356 rc = count; 356 rc = count;
357
357fail_kfree_cpa: 358fail_kfree_cpa:
358 kfree(cpa); 359 free_chan_prog(cpa);
359fail_kfree_buf:
360 kfree(buf);
361 return rc; 360 return rc;
362} 361}
363 362
@@ -473,7 +472,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
473 return rc; 472 return rc;
474 473
475 len = min((size_t) PAGE_SIZE, count); 474 len = min((size_t) PAGE_SIZE, count);
476 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 475 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
477 if (!buf) 476 if (!buf)
478 return -ENOMEM; 477 return -ENOMEM;
479 478
@@ -500,7 +499,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
500 *offs += copied; 499 *offs += copied;
501 rc = copied; 500 rc = copied;
502fail: 501fail:
503 kfree(buf); 502 free_page((unsigned long) buf);
504 return rc; 503 return rc;
505} 504}
506 505
@@ -543,56 +542,97 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
543 } 542 }
544} 543}
545 544
546static int verify_device(struct urdev *urd) 545static int verify_uri_device(struct urdev *urd)
547{ 546{
548 struct file_control_block fcb; 547 struct file_control_block *fcb;
549 char *buf; 548 char *buf;
550 int rc; 549 int rc;
551 550
551 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
552 if (!fcb)
553 return -ENOMEM;
554
555 /* check for empty reader device (beginning of chain) */
556 rc = diag_read_next_file_info(fcb, 0);
557 if (rc)
558 goto fail_free_fcb;
559
560 /* if file is in hold status, we do not read it */
561 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
562 rc = -EPERM;
563 goto fail_free_fcb;
564 }
565
566 /* open file on virtual reader */
567 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
568 if (!buf) {
569 rc = -ENOMEM;
570 goto fail_free_fcb;
571 }
572 rc = diag_read_file(urd->dev_id.devno, buf);
573 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
574 goto fail_free_buf;
575
576 /* check if the file on top of the queue is open now */
577 rc = diag_read_next_file_info(fcb, 0);
578 if (rc)
579 goto fail_free_buf;
580 if (!(fcb->file_stat & FLG_IN_USE)) {
581 rc = -EMFILE;
582 goto fail_free_buf;
583 }
584 rc = 0;
585
586fail_free_buf:
587 free_page((unsigned long) buf);
588fail_free_fcb:
589 kfree(fcb);
590 return rc;
591}
592
593static int verify_device(struct urdev *urd)
594{
552 switch (urd->class) { 595 switch (urd->class) {
553 case DEV_CLASS_UR_O: 596 case DEV_CLASS_UR_O:
554 return 0; /* no check needed here */ 597 return 0; /* no check needed here */
555 case DEV_CLASS_UR_I: 598 case DEV_CLASS_UR_I:
556 /* check for empty reader device (beginning of chain) */ 599 return verify_uri_device(urd);
557 rc = diag_read_next_file_info(&fcb, 0);
558 if (rc)
559 return rc;
560
561 /* open file on virtual reader */
562 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
563 if (!buf)
564 return -ENOMEM;
565 rc = diag_read_file(urd->dev_id.devno, buf);
566 kfree(buf);
567
568 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
569 return rc;
570 return 0;
571 default: 600 default:
572 return -ENOTSUPP; 601 return -ENOTSUPP;
573 } 602 }
574} 603}
575 604
576static int get_file_reclen(struct urdev *urd) 605static int get_uri_file_reclen(struct urdev *urd)
577{ 606{
578 struct file_control_block fcb; 607 struct file_control_block *fcb;
579 int rc; 608 int rc;
580 609
610 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
611 if (!fcb)
612 return -ENOMEM;
613 rc = diag_read_next_file_info(fcb, 0);
614 if (rc)
615 goto fail_free;
616 if (fcb->file_stat & FLG_CP_DUMP)
617 rc = 0;
618 else
619 rc = fcb->rec_len;
620
621fail_free:
622 kfree(fcb);
623 return rc;
624}
625
626static int get_file_reclen(struct urdev *urd)
627{
581 switch (urd->class) { 628 switch (urd->class) {
582 case DEV_CLASS_UR_O: 629 case DEV_CLASS_UR_O:
583 return 0; 630 return 0;
584 case DEV_CLASS_UR_I: 631 case DEV_CLASS_UR_I:
585 rc = diag_read_next_file_info(&fcb, 0); 632 return get_uri_file_reclen(urd);
586 if (rc)
587 return rc;
588 break;
589 default: 633 default:
590 return -ENOTSUPP; 634 return -ENOTSUPP;
591 } 635 }
592 if (fcb.file_stat & FLG_CP_DUMP)
593 return 0;
594
595 return fcb.rec_len;
596} 636}
597 637
598static int ur_open(struct inode *inode, struct file *file) 638static int ur_open(struct inode *inode, struct file *file)
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 16d0a4e38e40..2b3c564e0472 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -50,7 +50,10 @@ struct file_control_block {
50 char rest[200]; 50 char rest[200];
51} __attribute__ ((packed)); 51} __attribute__ ((packed));
52 52
53#define FLG_CP_DUMP 0x10 53#define FLG_SYSTEM_HOLD 0x04
54#define FLG_CP_DUMP 0x10
55#define FLG_USER_HOLD 0x20
56#define FLG_IN_USE 0x80
54 57
55/* 58/*
56 * A struct urdev is created for each ur device that is made available 59 * A struct urdev is created for each ur device that is made available
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1c27a5a06b49..5635e656c1a3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -79,6 +79,7 @@ css_alloc_subchannel(struct subchannel_id schid)
79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
80 ret = cio_modify(sch); 80 ret = cio_modify(sch);
81 if (ret) { 81 if (ret) {
82 kfree(sch->lock);
82 kfree(sch); 83 kfree(sch);
83 return ERR_PTR(ret); 84 return ERR_PTR(ret);
84 } 85 }
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index ed026a1dc324..03347aed2b3e 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -81,6 +81,7 @@ static __u32 volatile spare_indicator;
81static atomic_t spare_indicator_usecount; 81static atomic_t spare_indicator_usecount;
82#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 82#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83static mempool_t *qdio_mempool_scssc; 83static mempool_t *qdio_mempool_scssc;
84static struct kmem_cache *qdio_q_cache;
84 85
85static debug_info_t *qdio_dbf_setup; 86static debug_info_t *qdio_dbf_setup;
86static debug_info_t *qdio_dbf_sbal; 87static debug_info_t *qdio_dbf_sbal;
@@ -1617,23 +1618,21 @@ static void
1617qdio_release_irq_memory(struct qdio_irq *irq_ptr) 1618qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1618{ 1619{
1619 int i; 1620 int i;
1621 struct qdio_q *q;
1620 1622
1621 for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) { 1623 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
1622 if (!irq_ptr->input_qs[i]) 1624 q = irq_ptr->input_qs[i];
1623 goto next; 1625 if (q) {
1624 1626 free_page((unsigned long) q->slib);
1625 kfree(irq_ptr->input_qs[i]->slib); 1627 kmem_cache_free(qdio_q_cache, q);
1626 kfree(irq_ptr->input_qs[i]); 1628 }
1627 1629 q = irq_ptr->output_qs[i];
1628next: 1630 if (q) {
1629 if (!irq_ptr->output_qs[i]) 1631 free_page((unsigned long) q->slib);
1630 continue; 1632 kmem_cache_free(qdio_q_cache, q);
1631 1633 }
1632 kfree(irq_ptr->output_qs[i]->slib);
1633 kfree(irq_ptr->output_qs[i]);
1634
1635 } 1634 }
1636 kfree(irq_ptr->qdr); 1635 free_page((unsigned long) irq_ptr->qdr);
1637 free_page((unsigned long) irq_ptr); 1636 free_page((unsigned long) irq_ptr);
1638} 1637}
1639 1638
@@ -1680,44 +1679,35 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr,
1680{ 1679{
1681 int i; 1680 int i;
1682 struct qdio_q *q; 1681 struct qdio_q *q;
1683 int result=-ENOMEM;
1684
1685 for (i=0;i<no_input_qs;i++) {
1686 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1687 1682
1688 if (!q) { 1683 for (i = 0; i < no_input_qs; i++) {
1689 QDIO_PRINT_ERR("kmalloc of q failed!\n"); 1684 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1690 goto out; 1685 if (!q)
1691 } 1686 return -ENOMEM;
1687 memset(q, 0, sizeof(*q));
1692 1688
1693 q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL); 1689 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1694 if (!q->slib) { 1690 if (!q->slib) {
1695 QDIO_PRINT_ERR("kmalloc of slib failed!\n"); 1691 kmem_cache_free(qdio_q_cache, q);
1696 goto out; 1692 return -ENOMEM;
1697 } 1693 }
1698
1699 irq_ptr->input_qs[i]=q; 1694 irq_ptr->input_qs[i]=q;
1700 } 1695 }
1701 1696
1702 for (i=0;i<no_output_qs;i++) { 1697 for (i = 0; i < no_output_qs; i++) {
1703 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); 1698 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1704 1699 if (!q)
1705 if (!q) { 1700 return -ENOMEM;
1706 goto out; 1701 memset(q, 0, sizeof(*q));
1707 }
1708 1702
1709 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); 1703 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1710 if (!q->slib) { 1704 if (!q->slib) {
1711 QDIO_PRINT_ERR("kmalloc of slib failed!\n"); 1705 kmem_cache_free(qdio_q_cache, q);
1712 goto out; 1706 return -ENOMEM;
1713 } 1707 }
1714
1715 irq_ptr->output_qs[i]=q; 1708 irq_ptr->output_qs[i]=q;
1716 } 1709 }
1717 1710 return 0;
1718 result=0;
1719out:
1720 return result;
1721} 1711}
1722 1712
1723static void 1713static void
@@ -2985,17 +2975,17 @@ qdio_allocate(struct qdio_initialize *init_data)
2985 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); 2975 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2986 2976
2987 if (!irq_ptr) { 2977 if (!irq_ptr) {
2988 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n"); 2978 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
2989 return -ENOMEM; 2979 return -ENOMEM;
2990 } 2980 }
2991 2981
2992 init_MUTEX(&irq_ptr->setting_up_sema); 2982 init_MUTEX(&irq_ptr->setting_up_sema);
2993 2983
2994 /* QDR must be in DMA area since CCW data address is only 32 bit */ 2984 /* QDR must be in DMA area since CCW data address is only 32 bit */
2995 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); 2985 irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
2996 if (!(irq_ptr->qdr)) { 2986 if (!(irq_ptr->qdr)) {
2997 free_page((unsigned long) irq_ptr); 2987 free_page((unsigned long) irq_ptr);
2998 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); 2988 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
2999 return -ENOMEM; 2989 return -ENOMEM;
3000 } 2990 }
3001 QDIO_DBF_TEXT0(0,setup,"qdr:"); 2991 QDIO_DBF_TEXT0(0,setup,"qdr:");
@@ -3004,6 +2994,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3004 if (qdio_alloc_qs(irq_ptr, 2994 if (qdio_alloc_qs(irq_ptr,
3005 init_data->no_input_qs, 2995 init_data->no_input_qs,
3006 init_data->no_output_qs)) { 2996 init_data->no_output_qs)) {
2997 QDIO_PRINT_ERR("queue allocation failed!\n");
3007 qdio_release_irq_memory(irq_ptr); 2998 qdio_release_irq_memory(irq_ptr);
3008 return -ENOMEM; 2999 return -ENOMEM;
3009 } 3000 }
@@ -3895,9 +3886,19 @@ init_QDIO(void)
3895 if (res) 3886 if (res)
3896 return res; 3887 return res;
3897 3888
3889 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
3890 256, 0, NULL);
3891 if (!qdio_q_cache) {
3892 qdio_release_qdio_memory();
3893 return -ENOMEM;
3894 }
3895
3898 res = qdio_register_dbf_views(); 3896 res = qdio_register_dbf_views();
3899 if (res) 3897 if (res) {
3898 kmem_cache_destroy(qdio_q_cache);
3899 qdio_release_qdio_memory();
3900 return res; 3900 return res;
3901 }
3901 3902
3902 QDIO_DBF_TEXT0(0,setup,"initQDIO"); 3903 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3903 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); 3904 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
@@ -3929,6 +3930,7 @@ cleanup_QDIO(void)
3929 qdio_release_qdio_memory(); 3930 qdio_release_qdio_memory();
3930 qdio_unregister_dbf_views(); 3931 qdio_unregister_dbf_views();
3931 mempool_destroy(qdio_mempool_scssc); 3932 mempool_destroy(qdio_mempool_scssc);
3933 kmem_cache_destroy(qdio_q_cache);
3932 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); 3934 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3933 printk("qdio: %s: module removed\n",version); 3935 printk("qdio: %s: module removed\n",version);
3934} 3936}