aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 19:04:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 19:04:22 -0400
commit21cdbc1378e8aa96e1ed4a606dce1a8e7daf7fdf (patch)
tree55b6c294b912ccdc3eede15960b0ece53a69d902 /drivers/s390/cio
parent86d9c070175de65890794fa227b68297da6206d8 (diff)
parentef3500b2b2955af4fa6b0564b51c0c604e38c571 (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (81 commits) [S390] remove duplicated #includes [S390] cpumask: use mm_cpumask() wrapper [S390] cpumask: Use accessors code. [S390] cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits. [S390] cpumask: remove cpu_coregroup_map [S390] fix clock comparator save area usage [S390] Add hwcap flag for the etf3 enhancement facility [S390] Ensure that ipl panic notifier is called late. [S390] fix dfp elf hwcap/facility bit detection [S390] smp: perform initial cpu reset before starting a cpu [S390] smp: fix memory leak on __cpu_up [S390] ipl: Improve checking logic and remove switch defaults. [S390] s390dbf: Remove needless check for NULL pointer. [S390] s390dbf: Remove redundant initilizations. [S390] use kzfree() [S390] BUG to BUG_ON changes [S390] zfcpdump: Prevent zcore from beeing built as a kernel module. [S390] Use csum_partial in checksum.h [S390] cleanup lowcore.h [S390] eliminate ipl_device from lowcore ...
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/airq.c6
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c73
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.c21
-rw-r--r--drivers/s390/cio/crw.c159
-rw-r--r--drivers/s390/cio/css.c58
-rw-r--r--drivers/s390/cio/device.c44
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h8
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c222
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c23
18 files changed, 426 insertions, 222 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index bd79bd165396..adb3dd301528 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o 6 fcx.o itcw.o crw.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index fe6cea15bbaf..65d2e769dfa1 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -34,8 +34,8 @@ struct airq_t {
34 void *drv_data; 34 void *drv_data;
35}; 35};
36 36
37static union indicator_t indicators[MAX_ISC]; 37static union indicator_t indicators[MAX_ISC+1];
38static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
39 39
40static int register_airq(struct airq_t *airq, u8 isc) 40static int register_airq(struct airq_t *airq, u8 isc)
41{ 41{
@@ -133,6 +133,8 @@ void do_adapter_IO(u8 isc)
133 while (word) { 133 while (word) {
134 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
135 airq = airqs[isc][i]; 135 airq = airqs[isc][i];
136 /* Make sure gcc reads from airqs only once. */
137 barrier();
136 if (likely(airq)) 138 if (likely(airq))
137 airq->handler(&indicators[isc].byte[i], 139 airq->handler(&indicators[isc].byte[i],
138 airq->drv_data); 140 airq->drv_data);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index fe00be3675cd..6565f027791e 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -336,8 +336,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
336 size_t user_len, loff_t *offset) 336 size_t user_len, loff_t *offset)
337{ 337{
338 char *buf; 338 char *buf;
339 size_t i; 339 ssize_t rc, ret, i;
340 ssize_t rc, ret;
341 340
342 if (*offset) 341 if (*offset)
343 return -EINVAL; 342 return -EINVAL;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b91c1719b075..22ce765d537e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -315,16 +315,32 @@ error:
315} 315}
316EXPORT_SYMBOL(ccwgroup_create_from_string); 316EXPORT_SYMBOL(ccwgroup_create_from_string);
317 317
318static int __init 318static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
319init_ccwgroup (void) 319 void *data);
320
321static struct notifier_block ccwgroup_nb = {
322 .notifier_call = ccwgroup_notifier
323};
324
325static int __init init_ccwgroup(void)
320{ 326{
321 return bus_register (&ccwgroup_bus_type); 327 int ret;
328
329 ret = bus_register(&ccwgroup_bus_type);
330 if (ret)
331 return ret;
332
333 ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
334 if (ret)
335 bus_unregister(&ccwgroup_bus_type);
336
337 return ret;
322} 338}
323 339
324static void __exit 340static void __exit cleanup_ccwgroup(void)
325cleanup_ccwgroup (void)
326{ 341{
327 bus_unregister (&ccwgroup_bus_type); 342 bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
343 bus_unregister(&ccwgroup_bus_type);
328} 344}
329 345
330module_init(init_ccwgroup); 346module_init(init_ccwgroup);
@@ -392,27 +408,28 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
392 unsigned long value; 408 unsigned long value;
393 int ret; 409 int ret;
394 410
395 gdev = to_ccwgroupdev(dev);
396 if (!dev->driver) 411 if (!dev->driver)
397 return count; 412 return -ENODEV;
413
414 gdev = to_ccwgroupdev(dev);
415 gdrv = to_ccwgroupdrv(dev->driver);
398 416
399 gdrv = to_ccwgroupdrv (gdev->dev.driver);
400 if (!try_module_get(gdrv->owner)) 417 if (!try_module_get(gdrv->owner))
401 return -EINVAL; 418 return -EINVAL;
402 419
403 ret = strict_strtoul(buf, 0, &value); 420 ret = strict_strtoul(buf, 0, &value);
404 if (ret) 421 if (ret)
405 goto out; 422 goto out;
406 ret = count; 423
407 if (value == 1) 424 if (value == 1)
408 ccwgroup_set_online(gdev); 425 ret = ccwgroup_set_online(gdev);
409 else if (value == 0) 426 else if (value == 0)
410 ccwgroup_set_offline(gdev); 427 ret = ccwgroup_set_offline(gdev);
411 else 428 else
412 ret = -EINVAL; 429 ret = -EINVAL;
413out: 430out:
414 module_put(gdrv->owner); 431 module_put(gdrv->owner);
415 return ret; 432 return (ret == 0) ? count : ret;
416} 433}
417 434
418static ssize_t 435static ssize_t
@@ -454,13 +471,18 @@ ccwgroup_remove (struct device *dev)
454 struct ccwgroup_device *gdev; 471 struct ccwgroup_device *gdev;
455 struct ccwgroup_driver *gdrv; 472 struct ccwgroup_driver *gdrv;
456 473
474 device_remove_file(dev, &dev_attr_online);
475 device_remove_file(dev, &dev_attr_ungroup);
476
477 if (!dev->driver)
478 return 0;
479
457 gdev = to_ccwgroupdev(dev); 480 gdev = to_ccwgroupdev(dev);
458 gdrv = to_ccwgroupdrv(dev->driver); 481 gdrv = to_ccwgroupdrv(dev->driver);
459 482
460 device_remove_file(dev, &dev_attr_online); 483 if (gdrv->remove)
461
462 if (gdrv && gdrv->remove)
463 gdrv->remove(gdev); 484 gdrv->remove(gdev);
485
464 return 0; 486 return 0;
465} 487}
466 488
@@ -469,9 +491,13 @@ static void ccwgroup_shutdown(struct device *dev)
469 struct ccwgroup_device *gdev; 491 struct ccwgroup_device *gdev;
470 struct ccwgroup_driver *gdrv; 492 struct ccwgroup_driver *gdrv;
471 493
494 if (!dev->driver)
495 return;
496
472 gdev = to_ccwgroupdev(dev); 497 gdev = to_ccwgroupdev(dev);
473 gdrv = to_ccwgroupdrv(dev->driver); 498 gdrv = to_ccwgroupdrv(dev->driver);
474 if (gdrv && gdrv->shutdown) 499
500 if (gdrv->shutdown)
475 gdrv->shutdown(gdev); 501 gdrv->shutdown(gdev);
476} 502}
477 503
@@ -484,6 +510,19 @@ static struct bus_type ccwgroup_bus_type = {
484 .shutdown = ccwgroup_shutdown, 510 .shutdown = ccwgroup_shutdown,
485}; 511};
486 512
513
514static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
515 void *data)
516{
517 struct device *dev = data;
518
519 if (action == BUS_NOTIFY_UNBIND_DRIVER)
520 device_schedule_callback(dev, ccwgroup_ungroup_callback);
521
522 return NOTIFY_OK;
523}
524
525
487/** 526/**
488 * ccwgroup_driver_register() - register a ccw group driver 527 * ccwgroup_driver_register() - register a ccw group driver
489 * @cdriver: driver to be registered 528 * @cdriver: driver to be registered
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1246f61a5338..3e5f304ad88f 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -17,8 +17,8 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20#include <asm/crw.h>
20 21
21#include "../s390mach.h"
22#include "cio.h" 22#include "cio.h"
23#include "css.h" 23#include "css.h"
24#include "ioasm.h" 24#include "ioasm.h"
@@ -706,12 +706,12 @@ static int __init chp_init(void)
706 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret; 707 int ret;
708 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); 709 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret) 710 if (ret)
711 return ret; 711 return ret;
712 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
713 if (!chp_wq) { 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH); 714 crw_unregister_handler(CRW_RSC_CPATH);
715 return -ENOMEM; 715 return -ENOMEM;
716 } 716 }
717 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index ebab6ea4659b..883f16f96f22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -19,8 +19,8 @@
19#include <asm/cio.h> 19#include <asm/cio.h>
20#include <asm/chpid.h> 20#include <asm/chpid.h>
21#include <asm/chsc.h> 21#include <asm/chsc.h>
22#include <asm/crw.h>
22 23
23#include "../s390mach.h"
24#include "css.h" 24#include "css.h"
25#include "cio.h" 25#include "cio.h"
26#include "cio_debug.h" 26#include "cio_debug.h"
@@ -589,6 +589,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
589 case 0x0102: 589 case 0x0102:
590 case 0x0103: 590 case 0x0103:
591 ret = -EINVAL; 591 ret = -EINVAL;
592 break;
592 default: 593 default:
593 ret = chsc_error_from_response(secm_area->response.code); 594 ret = chsc_error_from_response(secm_area->response.code);
594 } 595 }
@@ -820,7 +821,7 @@ int __init chsc_alloc_sei_area(void)
820 "chsc machine checks!\n"); 821 "chsc machine checks!\n");
821 return -ENOMEM; 822 return -ENOMEM;
822 } 823 }
823 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); 824 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
824 if (ret) 825 if (ret)
825 kfree(sei_page); 826 kfree(sei_page);
826 return ret; 827 return ret;
@@ -828,7 +829,7 @@ int __init chsc_alloc_sei_area(void)
828 829
829void __init chsc_free_sei_area(void) 830void __init chsc_free_sei_area(void)
830{ 831{
831 s390_unregister_crw_handler(CRW_RSC_CSS); 832 crw_unregister_handler(CRW_RSC_CSS);
832 kfree(sei_page); 833 kfree(sei_page);
833} 834}
834 835
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 659f8a791656..2aebb9823044 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -30,6 +30,8 @@
30#include <asm/isc.h> 30#include <asm/isc.h>
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32#include <asm/fcx.h> 32#include <asm/fcx.h>
33#include <asm/nmi.h>
34#include <asm/crw.h>
33#include "cio.h" 35#include "cio.h"
34#include "css.h" 36#include "css.h"
35#include "chsc.h" 37#include "chsc.h"
@@ -38,7 +40,6 @@
38#include "blacklist.h" 40#include "blacklist.h"
39#include "cio_debug.h" 41#include "cio_debug.h"
40#include "chp.h" 42#include "chp.h"
41#include "../s390mach.h"
42 43
43debug_info_t *cio_debug_msg_id; 44debug_info_t *cio_debug_msg_id;
44debug_info_t *cio_debug_trace_id; 45debug_info_t *cio_debug_trace_id;
@@ -471,6 +472,7 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
471int cio_disable_subchannel(struct subchannel *sch) 472int cio_disable_subchannel(struct subchannel *sch)
472{ 473{
473 char dbf_txt[15]; 474 char dbf_txt[15];
475 int retry;
474 int ret; 476 int ret;
475 477
476 CIO_TRACE_EVENT (2, "dissch"); 478 CIO_TRACE_EVENT (2, "dissch");
@@ -481,16 +483,17 @@ int cio_disable_subchannel(struct subchannel *sch)
481 if (cio_update_schib(sch)) 483 if (cio_update_schib(sch))
482 return -ENODEV; 484 return -ENODEV;
483 485
484 if (scsw_actl(&sch->schib.scsw) != 0)
485 /*
486 * the disable function must not be called while there are
487 * requests pending for completion !
488 */
489 return -EBUSY;
490
491 sch->config.ena = 0; 486 sch->config.ena = 0;
492 ret = cio_commit_config(sch);
493 487
488 for (retry = 0; retry < 3; retry++) {
489 ret = cio_commit_config(sch);
490 if (ret == -EBUSY) {
491 struct irb irb;
492 if (tsch(sch->schid, &irb) != 0)
493 break;
494 } else
495 break;
496 }
494 sprintf (dbf_txt, "ret:%d", ret); 497 sprintf (dbf_txt, "ret:%d", ret);
495 CIO_TRACE_EVENT (2, dbf_txt); 498 CIO_TRACE_EVENT (2, dbf_txt);
496 return ret; 499 return ret;
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
new file mode 100644
index 000000000000..d157665d0e76
--- /dev/null
+++ b/drivers/s390/cio/crw.c
@@ -0,0 +1,159 @@
1/*
2 * Channel report handling code
3 *
4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */
10
11#include <linux/semaphore.h>
12#include <linux/mutex.h>
13#include <linux/kthread.h>
14#include <linux/init.h>
15#include <asm/crw.h>
16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS];
20
21/**
22 * crw_register_handler() - register a channel report word handler
23 * @rsc: reporting source code to handle
24 * @handler: handler to be registered
25 *
26 * Returns %0 on success and a negative error value otherwise.
27 */
28int crw_register_handler(int rsc, crw_handler_t handler)
29{
30 int rc = 0;
31
32 if ((rsc < 0) || (rsc >= NR_RSCS))
33 return -EINVAL;
34 mutex_lock(&crw_handler_mutex);
35 if (crw_handlers[rsc])
36 rc = -EBUSY;
37 else
38 crw_handlers[rsc] = handler;
39 mutex_unlock(&crw_handler_mutex);
40 return rc;
41}
42
43/**
44 * crw_unregister_handler() - unregister a channel report word handler
45 * @rsc: reporting source code to handle
46 */
47void crw_unregister_handler(int rsc)
48{
49 if ((rsc < 0) || (rsc >= NR_RSCS))
50 return;
51 mutex_lock(&crw_handler_mutex);
52 crw_handlers[rsc] = NULL;
53 mutex_unlock(&crw_handler_mutex);
54}
55
56/*
57 * Retrieve CRWs and call function to handle event.
58 */
59static int crw_collect_info(void *unused)
60{
61 struct crw crw[2];
62 int ccode;
63 unsigned int chain;
64 int ignore;
65
66repeat:
67 ignore = down_interruptible(&crw_semaphore);
68 chain = 0;
69 while (1) {
70 crw_handler_t handler;
71
72 if (unlikely(chain > 1)) {
73 struct crw tmp_crw;
74
75 printk(KERN_WARNING"%s: Code does not support more "
76 "than two chained crws; please report to "
77 "linux390@de.ibm.com!\n", __func__);
78 ccode = stcrw(&tmp_crw);
79 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
80 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
81 __func__, tmp_crw.slct, tmp_crw.oflw,
82 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
83 tmp_crw.erc, tmp_crw.rsid);
84 printk(KERN_WARNING"%s: This was crw number %x in the "
85 "chain\n", __func__, chain);
86 if (ccode != 0)
87 break;
88 chain = tmp_crw.chn ? chain + 1 : 0;
89 continue;
90 }
91 ccode = stcrw(&crw[chain]);
92 if (ccode != 0)
93 break;
94 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
95 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
96 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
97 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
98 crw[chain].rsid);
99 /* Check for overflows. */
100 if (crw[chain].oflw) {
101 int i;
102
103 pr_debug("%s: crw overflow detected!\n", __func__);
104 mutex_lock(&crw_handler_mutex);
105 for (i = 0; i < NR_RSCS; i++) {
106 if (crw_handlers[i])
107 crw_handlers[i](NULL, NULL, 1);
108 }
109 mutex_unlock(&crw_handler_mutex);
110 chain = 0;
111 continue;
112 }
113 if (crw[0].chn && !chain) {
114 chain++;
115 continue;
116 }
117 mutex_lock(&crw_handler_mutex);
118 handler = crw_handlers[crw[chain].rsc];
119 if (handler)
120 handler(&crw[0], chain ? &crw[1] : NULL, 0);
121 mutex_unlock(&crw_handler_mutex);
122 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0;
124 }
125 goto repeat;
126 return 0;
127}
128
129void crw_handle_channel_report(void)
130{
131 up(&crw_semaphore);
132}
133
134/*
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{
140 init_MUTEX_LOCKED(&crw_semaphore);
141 return 0;
142}
143pure_initcall(crw_init_semaphore);
144
145/*
146 * Machine checks for the channel subsystem must be enabled
147 * after the channel subsystem is initialized
148 */
149static int __init crw_machine_check_init(void)
150{
151 struct task_struct *task;
152
153 task = kthread_run(crw_collect_info, NULL, "kmcheck");
154 if (IS_ERR(task))
155 return PTR_ERR(task);
156 ctl_set_bit(14, 28); /* enable channel report MCH */
157 return 0;
158}
159device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 427d11d88069..0085d8901792 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,8 +18,8 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <asm/isc.h> 20#include <asm/isc.h>
21#include <asm/crw.h>
21 22
22#include "../s390mach.h"
23#include "css.h" 23#include "css.h"
24#include "cio.h" 24#include "cio.h"
25#include "cio_debug.h" 25#include "cio_debug.h"
@@ -83,6 +83,25 @@ static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
83 return rc; 83 return rc;
84} 84}
85 85
86static int call_fn_all_sch(struct subchannel_id schid, void *data)
87{
88 struct cb_data *cb = data;
89 struct subchannel *sch;
90 int rc = 0;
91
92 sch = get_subchannel_by_schid(schid);
93 if (sch) {
94 if (cb->fn_known_sch)
95 rc = cb->fn_known_sch(sch, cb->data);
96 put_device(&sch->dev);
97 } else {
98 if (cb->fn_unknown_sch)
99 rc = cb->fn_unknown_sch(schid, cb->data);
100 }
101
102 return rc;
103}
104
86int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 105int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
87 int (*fn_unknown)(struct subchannel_id, 106 int (*fn_unknown)(struct subchannel_id,
88 void *), void *data) 107 void *), void *data)
@@ -90,13 +109,17 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
90 struct cb_data cb; 109 struct cb_data cb;
91 int rc; 110 int rc;
92 111
93 cb.set = idset_sch_new();
94 if (!cb.set)
95 return -ENOMEM;
96 idset_fill(cb.set);
97 cb.data = data; 112 cb.data = data;
98 cb.fn_known_sch = fn_known; 113 cb.fn_known_sch = fn_known;
99 cb.fn_unknown_sch = fn_unknown; 114 cb.fn_unknown_sch = fn_unknown;
115
116 cb.set = idset_sch_new();
117 if (!cb.set)
118 /* fall back to brute force scanning in case of oom */
119 return for_each_subchannel(call_fn_all_sch, &cb);
120
121 idset_fill(cb.set);
122
100 /* Process registered subchannels. */ 123 /* Process registered subchannels. */
101 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
102 if (rc) 125 if (rc)
@@ -510,6 +533,17 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
510 return ret; 533 return ret;
511} 534}
512 535
536static void reprobe_after_idle(struct work_struct *unused)
537{
538 /* Make sure initial subchannel scan is done. */
539 wait_event(ccw_device_init_wq,
540 atomic_read(&ccw_device_init_count) == 0);
541 if (need_reprobe)
542 css_schedule_reprobe();
543}
544
545static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
546
513/* Work function used to reprobe all unregistered subchannels. */ 547/* Work function used to reprobe all unregistered subchannels. */
514static void reprobe_all(struct work_struct *unused) 548static void reprobe_all(struct work_struct *unused)
515{ 549{
@@ -517,10 +551,12 @@ static void reprobe_all(struct work_struct *unused)
517 551
518 CIO_MSG_EVENT(4, "reprobe start\n"); 552 CIO_MSG_EVENT(4, "reprobe start\n");
519 553
520 need_reprobe = 0;
521 /* Make sure initial subchannel scan is done. */ 554 /* Make sure initial subchannel scan is done. */
522 wait_event(ccw_device_init_wq, 555 if (atomic_read(&ccw_device_init_count) != 0) {
523 atomic_read(&ccw_device_init_count) == 0); 556 queue_work(ccw_device_work, &reprobe_idle_work);
557 return;
558 }
559 need_reprobe = 0;
524 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 560 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
525 561
526 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 562 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
@@ -619,7 +655,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
619 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
620 } else { 656 } else {
621#ifdef CONFIG_SMP 657#ifdef CONFIG_SMP
622 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 658 css->global_pgid.pgid_high.cpu_addr = stap();
623#else 659#else
624 css->global_pgid.pgid_high.cpu_addr = 0; 660 css->global_pgid.pgid_high.cpu_addr = 0;
625#endif 661#endif
@@ -765,7 +801,7 @@ init_channel_subsystem (void)
765 if (ret) 801 if (ret)
766 goto out; 802 goto out;
767 803
768 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); 804 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
769 if (ret) 805 if (ret)
770 goto out; 806 goto out;
771 807
@@ -845,7 +881,7 @@ out_unregister:
845out_bus: 881out_bus:
846 bus_unregister(&css_bus_type); 882 bus_unregister(&css_bus_type);
847out: 883out:
848 s390_unregister_crw_handler(CRW_RSC_CSS); 884 crw_unregister_handler(CRW_RSC_CSS);
849 chsc_free_sei_area(); 885 chsc_free_sei_area();
850 kfree(slow_subchannel_set); 886 kfree(slow_subchannel_set);
851 pr_alert("The CSS device driver initialization failed with " 887 pr_alert("The CSS device driver initialization failed with "
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e28f8ae53453..c4d2f667a2f6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -457,12 +457,13 @@ int ccw_device_set_online(struct ccw_device *cdev)
457 return (ret == 0) ? -ENODEV : ret; 457 return (ret == 0) ? -ENODEV : ret;
458} 458}
459 459
460static void online_store_handle_offline(struct ccw_device *cdev) 460static int online_store_handle_offline(struct ccw_device *cdev)
461{ 461{
462 if (cdev->private->state == DEV_STATE_DISCONNECTED) 462 if (cdev->private->state == DEV_STATE_DISCONNECTED)
463 ccw_device_remove_disconnected(cdev); 463 ccw_device_remove_disconnected(cdev);
464 else if (cdev->drv && cdev->drv->set_offline) 464 else if (cdev->online && cdev->drv && cdev->drv->set_offline)
465 ccw_device_set_offline(cdev); 465 return ccw_device_set_offline(cdev);
466 return 0;
466} 467}
467 468
468static int online_store_recog_and_online(struct ccw_device *cdev) 469static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -530,13 +531,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
530 goto out; 531 goto out;
531 switch (i) { 532 switch (i) {
532 case 0: 533 case 0:
533 online_store_handle_offline(cdev); 534 ret = online_store_handle_offline(cdev);
534 ret = count;
535 break; 535 break;
536 case 1: 536 case 1:
537 ret = online_store_handle_online(cdev, force); 537 ret = online_store_handle_online(cdev, force);
538 if (!ret)
539 ret = count;
540 break; 538 break;
541 default: 539 default:
542 ret = -EINVAL; 540 ret = -EINVAL;
@@ -545,7 +543,7 @@ out:
545 if (cdev->drv) 543 if (cdev->drv)
546 module_put(cdev->drv->owner); 544 module_put(cdev->drv->owner);
547 atomic_set(&cdev->private->onoff, 0); 545 atomic_set(&cdev->private->onoff, 0);
548 return ret; 546 return (ret < 0) ? ret : count;
549} 547}
550 548
551static ssize_t 549static ssize_t
@@ -681,35 +679,22 @@ get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
681 return dev ? to_ccwdev(dev) : NULL; 679 return dev ? to_ccwdev(dev) : NULL;
682} 680}
683 681
684static void 682void ccw_device_do_unbind_bind(struct work_struct *work)
685ccw_device_add_changed(struct work_struct *work)
686{
687 struct ccw_device_private *priv;
688 struct ccw_device *cdev;
689
690 priv = container_of(work, struct ccw_device_private, kick_work);
691 cdev = priv->cdev;
692 if (device_add(&cdev->dev)) {
693 put_device(&cdev->dev);
694 return;
695 }
696 set_bit(1, &cdev->private->registered);
697}
698
699void ccw_device_do_unreg_rereg(struct work_struct *work)
700{ 683{
701 struct ccw_device_private *priv; 684 struct ccw_device_private *priv;
702 struct ccw_device *cdev; 685 struct ccw_device *cdev;
703 struct subchannel *sch; 686 struct subchannel *sch;
687 int ret;
704 688
705 priv = container_of(work, struct ccw_device_private, kick_work); 689 priv = container_of(work, struct ccw_device_private, kick_work);
706 cdev = priv->cdev; 690 cdev = priv->cdev;
707 sch = to_subchannel(cdev->dev.parent); 691 sch = to_subchannel(cdev->dev.parent);
708 692
709 ccw_device_unregister(cdev); 693 if (test_bit(1, &cdev->private->registered)) {
710 PREPARE_WORK(&cdev->private->kick_work, 694 device_release_driver(&cdev->dev);
711 ccw_device_add_changed); 695 ret = device_attach(&cdev->dev);
712 queue_work(ccw_device_work, &cdev->private->kick_work); 696 WARN_ON(ret == -ENODEV);
697 }
713} 698}
714 699
715static void 700static void
@@ -1035,8 +1020,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1035void 1020void
1036io_subchannel_recog_done(struct ccw_device *cdev) 1021io_subchannel_recog_done(struct ccw_device *cdev)
1037{ 1022{
1038 struct subchannel *sch;
1039
1040 if (css_init_done == 0) { 1023 if (css_init_done == 0) {
1041 cdev->private->flags.recog_done = 1; 1024 cdev->private->flags.recog_done = 1;
1042 return; 1025 return;
@@ -1047,7 +1030,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1047 /* Remove device found not operational. */ 1030 /* Remove device found not operational. */
1048 if (!get_device(&cdev->dev)) 1031 if (!get_device(&cdev->dev))
1049 break; 1032 break;
1050 sch = to_subchannel(cdev->dev.parent);
1051 PREPARE_WORK(&cdev->private->kick_work, 1033 PREPARE_WORK(&cdev->private->kick_work,
1052 ccw_device_call_sch_unregister); 1034 ccw_device_call_sch_unregister);
1053 queue_work(slow_path_wq, &cdev->private->kick_work); 1035 queue_work(slow_path_wq, &cdev->private->kick_work);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0f2e63ea48de..85e01846ca65 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,7 +80,7 @@ void io_subchannel_init_config(struct subchannel *sch);
80 80
81int ccw_device_cancel_halt_clear(struct ccw_device *); 81int ccw_device_cancel_halt_clear(struct ccw_device *);
82 82
83void ccw_device_do_unreg_rereg(struct work_struct *); 83void ccw_device_do_unbind_bind(struct work_struct *);
84void ccw_device_move_to_orphanage(struct work_struct *); 84void ccw_device_move_to_orphanage(struct work_struct *);
85int ccw_device_is_orphan(struct ccw_device *); 85int ccw_device_is_orphan(struct ccw_device *);
86 86
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8df5eaafc5ab..87b4bfca080f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
194 cdev->id.dev_type != cdev->private->senseid.dev_type || 194 cdev->id.dev_type != cdev->private->senseid.dev_type ||
195 cdev->id.dev_model != cdev->private->senseid.dev_model) { 195 cdev->id.dev_model != cdev->private->senseid.dev_model) {
196 PREPARE_WORK(&cdev->private->kick_work, 196 PREPARE_WORK(&cdev->private->kick_work,
197 ccw_device_do_unreg_rereg); 197 ccw_device_do_unbind_bind);
198 queue_work(ccw_device_work, &cdev->private->kick_work); 198 queue_work(ccw_device_work, &cdev->private->kick_work);
199 return 0; 199 return 0;
200 } 200 }
@@ -366,7 +366,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
366 } 366 }
367 /* Driver doesn't want device back. */ 367 /* Driver doesn't want device back. */
368 ccw_device_set_notoper(cdev); 368 ccw_device_set_notoper(cdev);
369 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg); 369 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
370 queue_work(ccw_device_work, &cdev->private->kick_work); 370 queue_work(ccw_device_work, &cdev->private->kick_work);
371} 371}
372 372
@@ -728,7 +728,7 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
728{ 728{
729 struct subchannel *sch; 729 struct subchannel *sch;
730 730
731 cdev->private->state = DEV_STATE_NOT_OPER; 731 ccw_device_set_notoper(cdev);
732 sch = to_subchannel(cdev->dev.parent); 732 sch = to_subchannel(cdev->dev.parent);
733 css_schedule_eval(sch->schid); 733 css_schedule_eval(sch->schid);
734} 734}
@@ -1052,7 +1052,7 @@ ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1052 sch = to_subchannel(cdev->dev.parent); 1052 sch = to_subchannel(cdev->dev.parent);
1053 /* 1053 /*
1054 * An interrupt in state offline means a previous disable was not 1054 * An interrupt in state offline means a previous disable was not
1055 * successful. Try again. 1055 * successful - should not happen, but we try to disable again.
1056 */ 1056 */
1057 cio_disable_subchannel(sch); 1057 cio_disable_subchannel(sch);
1058} 1058}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index eabcc42d63df..151754d54745 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -680,7 +680,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
680 if (cdev->private->state != DEV_STATE_ONLINE) 680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO; 681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) || 682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) 683 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
684 return -EINVAL; 684 return -EINVAL;
685 return cio_tm_intrg(sch); 685 return cio_tm_intrg(sch);
686} 686}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 42f2b09631b6..13bcb8114388 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -186,6 +186,9 @@ struct qdio_input_q {
186 /* input buffer acknowledgement flag */ 186 /* input buffer acknowledgement flag */
187 int polling; 187 int polling;
188 188
189 /* first ACK'ed buffer */
190 int ack_start;
191
189 /* how much sbals are acknowledged with qebsm */ 192 /* how much sbals are acknowledged with qebsm */
190 int ack_count; 193 int ack_count;
191 194
@@ -234,7 +237,7 @@ struct qdio_q {
234 int first_to_check; 237 int first_to_check;
235 238
236 /* first_to_check of the last time */ 239 /* first_to_check of the last time */
237 int last_move_ftc; 240 int last_move;
238 241
239 /* beginning position for calling the program */ 242 /* beginning position for calling the program */
240 int first_to_kick; 243 int first_to_kick;
@@ -244,7 +247,6 @@ struct qdio_q {
244 247
245 struct qdio_irq *irq_ptr; 248 struct qdio_irq *irq_ptr;
246 struct tasklet_struct tasklet; 249 struct tasklet_struct tasklet;
247 spinlock_t lock;
248 250
249 /* error condition during a data transfer */ 251 /* error condition during a data transfer */
250 unsigned int qdio_error; 252 unsigned int qdio_error;
@@ -354,7 +356,7 @@ int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
354 int auto_ack); 356 int auto_ack);
355void qdio_check_outbound_after_thinint(struct qdio_q *q); 357void qdio_check_outbound_after_thinint(struct qdio_q *q);
356int qdio_inbound_q_moved(struct qdio_q *q); 358int qdio_inbound_q_moved(struct qdio_q *q);
357void qdio_kick_inbound_handler(struct qdio_q *q); 359void qdio_kick_handler(struct qdio_q *q);
358void qdio_stop_polling(struct qdio_q *q); 360void qdio_stop_polling(struct qdio_q *q);
359int qdio_siga_sync_q(struct qdio_q *q); 361int qdio_siga_sync_q(struct qdio_q *q);
360 362
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index da7afb04e71f..e3434b34f86c 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -63,8 +63,9 @@ static int qstat_show(struct seq_file *m, void *v)
63 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); 63 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
64 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); 64 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
65 seq_printf(m, "ftc: %d\n", q->first_to_check); 65 seq_printf(m, "ftc: %d\n", q->first_to_check);
66 seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); 66 seq_printf(m, "last_move: %d\n", q->last_move);
67 seq_printf(m, "polling: %d\n", q->u.in.polling); 67 seq_printf(m, "polling: %d\n", q->u.in.polling);
68 seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
68 seq_printf(m, "ack count: %d\n", q->u.in.ack_count); 69 seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
69 seq_printf(m, "slsb buffer states:\n"); 70 seq_printf(m, "slsb buffer states:\n");
70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 10cb0f8726e5..9e8a2914259b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -380,11 +380,11 @@ inline void qdio_stop_polling(struct qdio_q *q)
380 380
381 /* show the card that we are not polling anymore */ 381 /* show the card that we are not polling anymore */
382 if (is_qebsm(q)) { 382 if (is_qebsm(q)) {
383 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, 383 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
384 q->u.in.ack_count); 384 q->u.in.ack_count);
385 q->u.in.ack_count = 0; 385 q->u.in.ack_count = 0;
386 } else 386 } else
387 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); 387 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
388} 388}
389 389
390static void announce_buffer_error(struct qdio_q *q, int count) 390static void announce_buffer_error(struct qdio_q *q, int count)
@@ -419,15 +419,15 @@ static inline void inbound_primed(struct qdio_q *q, int count)
419 if (!q->u.in.polling) { 419 if (!q->u.in.polling) {
420 q->u.in.polling = 1; 420 q->u.in.polling = 1;
421 q->u.in.ack_count = count; 421 q->u.in.ack_count = count;
422 q->last_move_ftc = q->first_to_check; 422 q->u.in.ack_start = q->first_to_check;
423 return; 423 return;
424 } 424 }
425 425
426 /* delete the previous ACK's */ 426 /* delete the previous ACK's */
427 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, 427 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
428 q->u.in.ack_count); 428 q->u.in.ack_count);
429 q->u.in.ack_count = count; 429 q->u.in.ack_count = count;
430 q->last_move_ftc = q->first_to_check; 430 q->u.in.ack_start = q->first_to_check;
431 return; 431 return;
432 } 432 }
433 433
@@ -439,14 +439,13 @@ static inline void inbound_primed(struct qdio_q *q, int count)
439 if (q->u.in.polling) { 439 if (q->u.in.polling) {
440 /* reset the previous ACK but first set the new one */ 440 /* reset the previous ACK but first set the new one */
441 set_buf_state(q, new, SLSB_P_INPUT_ACK); 441 set_buf_state(q, new, SLSB_P_INPUT_ACK);
442 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); 442 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
443 } 443 } else {
444 else {
445 q->u.in.polling = 1; 444 q->u.in.polling = 1;
446 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); 445 set_buf_state(q, new, SLSB_P_INPUT_ACK);
447 } 446 }
448 447
449 q->last_move_ftc = new; 448 q->u.in.ack_start = new;
450 count--; 449 count--;
451 if (!count) 450 if (!count)
452 return; 451 return;
@@ -455,7 +454,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
455 * Need to change all PRIMED buffers to NOT_INIT, otherwise 454 * Need to change all PRIMED buffers to NOT_INIT, otherwise
456 * we're loosing initiative in the thinint code. 455 * we're loosing initiative in the thinint code.
457 */ 456 */
458 set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, 457 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
459 count); 458 count);
460} 459}
461 460
@@ -523,7 +522,8 @@ int qdio_inbound_q_moved(struct qdio_q *q)
523 522
524 bufnr = get_inbound_buffer_frontier(q); 523 bufnr = get_inbound_buffer_frontier(q);
525 524
526 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 525 if ((bufnr != q->last_move) || q->qdio_error) {
526 q->last_move = bufnr;
527 if (!need_siga_sync(q) && !pci_out_supported(q)) 527 if (!need_siga_sync(q) && !pci_out_supported(q))
528 q->u.in.timestamp = get_usecs(); 528 q->u.in.timestamp = get_usecs();
529 529
@@ -570,29 +570,30 @@ static int qdio_inbound_q_done(struct qdio_q *q)
570 } 570 }
571} 571}
572 572
573void qdio_kick_inbound_handler(struct qdio_q *q) 573void qdio_kick_handler(struct qdio_q *q)
574{ 574{
575 int count, start, end; 575 int start = q->first_to_kick;
576 576 int end = q->first_to_check;
577 qdio_perf_stat_inc(&perf_stats.inbound_handler); 577 int count;
578
579 start = q->first_to_kick;
580 end = q->first_to_check;
581 if (end >= start)
582 count = end - start;
583 else
584 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
585
586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
587 578
588 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 579 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
589 return; 580 return;
590 581
591 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, 582 count = sub_buf(end, start);
592 start, count, q->irq_ptr->int_parm); 583
584 if (q->is_input_q) {
585 qdio_perf_stat_inc(&perf_stats.inbound_handler);
586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
587 } else {
588 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
589 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
590 }
591
592 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
593 q->irq_ptr->int_parm);
593 594
594 /* for the next time */ 595 /* for the next time */
595 q->first_to_kick = q->first_to_check; 596 q->first_to_kick = end;
596 q->qdio_error = 0; 597 q->qdio_error = 0;
597} 598}
598 599
@@ -603,7 +604,7 @@ again:
603 if (!qdio_inbound_q_moved(q)) 604 if (!qdio_inbound_q_moved(q))
604 return; 605 return;
605 606
606 qdio_kick_inbound_handler(q); 607 qdio_kick_handler(q);
607 608
608 if (!qdio_inbound_q_done(q)) 609 if (!qdio_inbound_q_done(q))
609 /* means poll time is not yet over */ 610 /* means poll time is not yet over */
@@ -698,21 +699,21 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
698 699
699 bufnr = get_outbound_buffer_frontier(q); 700 bufnr = get_outbound_buffer_frontier(q);
700 701
701 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 702 if ((bufnr != q->last_move) || q->qdio_error) {
702 q->last_move_ftc = bufnr; 703 q->last_move = bufnr;
703 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
704 return 1; 705 return 1;
705 } else 706 } else
706 return 0; 707 return 0;
707} 708}
708 709
709static void qdio_kick_outbound_q(struct qdio_q *q) 710static int qdio_kick_outbound_q(struct qdio_q *q)
710{ 711{
711 unsigned int busy_bit; 712 unsigned int busy_bit;
712 int cc; 713 int cc;
713 714
714 if (!need_siga_out(q)) 715 if (!need_siga_out(q))
715 return; 716 return 0;
716 717
717 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 718 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
718 qdio_perf_stat_inc(&perf_stats.siga_out); 719 qdio_perf_stat_inc(&perf_stats.siga_out);
@@ -724,75 +725,37 @@ static void qdio_kick_outbound_q(struct qdio_q *q)
724 case 2: 725 case 2:
725 if (busy_bit) { 726 if (busy_bit) {
726 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); 727 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
727 q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; 728 cc |= QDIO_ERROR_SIGA_BUSY;
728 } else { 729 } else
729 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", 730 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
730 q->nr);
731 q->qdio_error = cc;
732 }
733 break; 731 break;
734 case 1: 732 case 1:
735 case 3: 733 case 3:
736 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 734 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
737 q->qdio_error = cc;
738 break; 735 break;
739 } 736 }
740} 737 return cc;
741
742static void qdio_kick_outbound_handler(struct qdio_q *q)
743{
744 int start, end, count;
745
746 start = q->first_to_kick;
747 end = q->last_move_ftc;
748 if (end >= start)
749 count = end - start;
750 else
751 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
752
753 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
754 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
755
756 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
757 return;
758
759 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
760 q->irq_ptr->int_parm);
761
762 /* for the next time: */
763 q->first_to_kick = q->last_move_ftc;
764 q->qdio_error = 0;
765} 738}
766 739
767static void __qdio_outbound_processing(struct qdio_q *q) 740static void __qdio_outbound_processing(struct qdio_q *q)
768{ 741{
769 unsigned long flags;
770
771 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 742 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
772 spin_lock_irqsave(&q->lock, flags);
773
774 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 743 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
775 744
776 if (qdio_outbound_q_moved(q)) 745 if (qdio_outbound_q_moved(q))
777 qdio_kick_outbound_handler(q); 746 qdio_kick_handler(q);
778
779 spin_unlock_irqrestore(&q->lock, flags);
780 747
781 if (queue_type(q) == QDIO_ZFCP_QFMT) { 748 if (queue_type(q) == QDIO_ZFCP_QFMT)
782 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 749 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
783 tasklet_schedule(&q->tasklet); 750 goto sched;
784 return;
785 }
786 751
787 /* bail out for HiperSockets unicast queues */ 752 /* bail out for HiperSockets unicast queues */
788 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 753 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
789 return; 754 return;
790 755
791 if ((queue_type(q) == QDIO_IQDIO_QFMT) && 756 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
792 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) { 757 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
793 tasklet_schedule(&q->tasklet); 758 goto sched;
794 return;
795 }
796 759
797 if (q->u.out.pci_out_enabled) 760 if (q->u.out.pci_out_enabled)
798 return; 761 return;
@@ -810,6 +773,12 @@ static void __qdio_outbound_processing(struct qdio_q *q)
810 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); 773 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
811 } 774 }
812 } 775 }
776 return;
777
778sched:
779 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
780 return;
781 tasklet_schedule(&q->tasklet);
813} 782}
814 783
815/* outbound tasklet */ 784/* outbound tasklet */
@@ -822,6 +791,9 @@ void qdio_outbound_processing(unsigned long data)
822void qdio_outbound_timer(unsigned long data) 791void qdio_outbound_timer(unsigned long data)
823{ 792{
824 struct qdio_q *q = (struct qdio_q *)data; 793 struct qdio_q *q = (struct qdio_q *)data;
794
795 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
796 return;
825 tasklet_schedule(&q->tasklet); 797 tasklet_schedule(&q->tasklet);
826} 798}
827 799
@@ -863,6 +835,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
863 int i; 835 int i;
864 struct qdio_q *q; 836 struct qdio_q *q;
865 837
838 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
839 return;
840
866 qdio_perf_stat_inc(&perf_stats.pci_int); 841 qdio_perf_stat_inc(&perf_stats.pci_int);
867 842
868 for_each_input_queue(irq_ptr, q, i) 843 for_each_input_queue(irq_ptr, q, i)
@@ -1065,8 +1040,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1065 * @cdev: associated ccw device 1040 * @cdev: associated ccw device
1066 * @how: use halt or clear to shutdown 1041 * @how: use halt or clear to shutdown
1067 * 1042 *
1068 * This function calls qdio_shutdown() for @cdev with method @how 1043 * This function calls qdio_shutdown() for @cdev with method @how.
1069 * and on success qdio_free() for @cdev. 1044 * and qdio_free(). The qdio_free() return value is ignored since
1045 * !irq_ptr is already checked.
1070 */ 1046 */
1071int qdio_cleanup(struct ccw_device *cdev, int how) 1047int qdio_cleanup(struct ccw_device *cdev, int how)
1072{ 1048{
@@ -1077,8 +1053,8 @@ int qdio_cleanup(struct ccw_device *cdev, int how)
1077 return -ENODEV; 1053 return -ENODEV;
1078 1054
1079 rc = qdio_shutdown(cdev, how); 1055 rc = qdio_shutdown(cdev, how);
1080 if (rc == 0) 1056
1081 rc = qdio_free(cdev); 1057 qdio_free(cdev);
1082 return rc; 1058 return rc;
1083} 1059}
1084EXPORT_SYMBOL_GPL(qdio_cleanup); 1060EXPORT_SYMBOL_GPL(qdio_cleanup);
@@ -1090,11 +1066,11 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
1090 int i; 1066 int i;
1091 1067
1092 for_each_input_queue(irq_ptr, q, i) 1068 for_each_input_queue(irq_ptr, q, i)
1093 tasklet_disable(&q->tasklet); 1069 tasklet_kill(&q->tasklet);
1094 1070
1095 for_each_output_queue(irq_ptr, q, i) { 1071 for_each_output_queue(irq_ptr, q, i) {
1096 tasklet_disable(&q->tasklet);
1097 del_timer(&q->u.out.timer); 1072 del_timer(&q->u.out.timer);
1073 tasklet_kill(&q->tasklet);
1098 } 1074 }
1099} 1075}
1100 1076
@@ -1112,6 +1088,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1112 if (!irq_ptr) 1088 if (!irq_ptr)
1113 return -ENODEV; 1089 return -ENODEV;
1114 1090
1091 BUG_ON(irqs_disabled());
1115 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1092 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1116 1093
1117 mutex_lock(&irq_ptr->setup_mutex); 1094 mutex_lock(&irq_ptr->setup_mutex);
@@ -1124,6 +1101,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1124 return 0; 1101 return 0;
1125 } 1102 }
1126 1103
1104 /*
1105 * Indicate that the device is going down. Scheduling the queue
1106 * tasklets is forbidden from here on.
1107 */
1108 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1109
1127 tiqdio_remove_input_queues(irq_ptr); 1110 tiqdio_remove_input_queues(irq_ptr);
1128 qdio_shutdown_queues(cdev); 1111 qdio_shutdown_queues(cdev);
1129 qdio_shutdown_debug_entries(irq_ptr, cdev); 1112 qdio_shutdown_debug_entries(irq_ptr, cdev);
@@ -1403,9 +1386,8 @@ int qdio_activate(struct ccw_device *cdev)
1403 switch (irq_ptr->state) { 1386 switch (irq_ptr->state) {
1404 case QDIO_IRQ_STATE_STOPPED: 1387 case QDIO_IRQ_STATE_STOPPED:
1405 case QDIO_IRQ_STATE_ERR: 1388 case QDIO_IRQ_STATE_ERR:
1406 mutex_unlock(&irq_ptr->setup_mutex); 1389 rc = -EIO;
1407 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1390 break;
1408 return -EIO;
1409 default: 1391 default:
1410 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1392 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1411 rc = 0; 1393 rc = 0;
@@ -1442,10 +1424,10 @@ static inline int buf_in_between(int bufnr, int start, int count)
1442 * @bufnr: first buffer to process 1424 * @bufnr: first buffer to process
1443 * @count: how many buffers are emptied 1425 * @count: how many buffers are emptied
1444 */ 1426 */
1445static void handle_inbound(struct qdio_q *q, unsigned int callflags, 1427static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1446 int bufnr, int count) 1428 int bufnr, int count)
1447{ 1429{
1448 int used, cc, diff; 1430 int used, diff;
1449 1431
1450 if (!q->u.in.polling) 1432 if (!q->u.in.polling)
1451 goto set; 1433 goto set;
@@ -1456,19 +1438,18 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1456 q->u.in.polling = 0; 1438 q->u.in.polling = 0;
1457 q->u.in.ack_count = 0; 1439 q->u.in.ack_count = 0;
1458 goto set; 1440 goto set;
1459 } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { 1441 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1460 if (is_qebsm(q)) { 1442 if (is_qebsm(q)) {
1461 /* partial overwrite, just update last_move_ftc */ 1443 /* partial overwrite, just update ack_start */
1462 diff = add_buf(bufnr, count); 1444 diff = add_buf(bufnr, count);
1463 diff = sub_buf(diff, q->last_move_ftc); 1445 diff = sub_buf(diff, q->u.in.ack_start);
1464 q->u.in.ack_count -= diff; 1446 q->u.in.ack_count -= diff;
1465 if (q->u.in.ack_count <= 0) { 1447 if (q->u.in.ack_count <= 0) {
1466 q->u.in.polling = 0; 1448 q->u.in.polling = 0;
1467 q->u.in.ack_count = 0; 1449 q->u.in.ack_count = 0;
1468 /* TODO: must we set last_move_ftc to something meaningful? */
1469 goto set; 1450 goto set;
1470 } 1451 }
1471 q->last_move_ftc = add_buf(q->last_move_ftc, diff); 1452 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1472 } 1453 }
1473 else 1454 else
1474 /* the only ACK will be deleted, so stop polling */ 1455 /* the only ACK will be deleted, so stop polling */
@@ -1483,13 +1464,11 @@ set:
1483 1464
1484 /* no need to signal as long as the adapter had free buffers */ 1465 /* no need to signal as long as the adapter had free buffers */
1485 if (used) 1466 if (used)
1486 return; 1467 return 0;
1487 1468
1488 if (need_siga_in(q)) { 1469 if (need_siga_in(q))
1489 cc = qdio_siga_input(q); 1470 return qdio_siga_input(q);
1490 if (cc) 1471 return 0;
1491 q->qdio_error = cc;
1492 }
1493} 1472}
1494 1473
1495/** 1474/**
@@ -1499,11 +1478,11 @@ set:
1499 * @bufnr: first buffer to process 1478 * @bufnr: first buffer to process
1500 * @count: how many buffers are filled 1479 * @count: how many buffers are filled
1501 */ 1480 */
1502static void handle_outbound(struct qdio_q *q, unsigned int callflags, 1481static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1503 int bufnr, int count) 1482 int bufnr, int count)
1504{ 1483{
1505 unsigned char state; 1484 unsigned char state;
1506 int used; 1485 int used, rc = 0;
1507 1486
1508 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1487 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1509 1488
@@ -1518,27 +1497,26 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1518 1497
1519 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1498 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1520 if (multicast_outbound(q)) 1499 if (multicast_outbound(q))
1521 qdio_kick_outbound_q(q); 1500 rc = qdio_kick_outbound_q(q);
1522 else 1501 else
1523 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1502 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1524 (count > 1) && 1503 (count > 1) &&
1525 (count <= q->irq_ptr->ssqd_desc.mmwc)) { 1504 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1526 /* exploit enhanced SIGA */ 1505 /* exploit enhanced SIGA */
1527 q->u.out.use_enh_siga = 1; 1506 q->u.out.use_enh_siga = 1;
1528 qdio_kick_outbound_q(q); 1507 rc = qdio_kick_outbound_q(q);
1529 } else { 1508 } else {
1530 /* 1509 /*
1531 * One siga-w per buffer required for unicast 1510 * One siga-w per buffer required for unicast
1532 * HiperSockets. 1511 * HiperSockets.
1533 */ 1512 */
1534 q->u.out.use_enh_siga = 0; 1513 q->u.out.use_enh_siga = 0;
1535 while (count--) 1514 while (count--) {
1536 qdio_kick_outbound_q(q); 1515 rc = qdio_kick_outbound_q(q);
1516 if (rc)
1517 goto out;
1518 }
1537 } 1519 }
1538
1539 /* report CC=2 conditions synchronously */
1540 if (q->qdio_error)
1541 __qdio_outbound_processing(q);
1542 goto out; 1520 goto out;
1543 } 1521 }
1544 1522
@@ -1550,14 +1528,14 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1550 /* try to fast requeue buffers */ 1528 /* try to fast requeue buffers */
1551 get_buf_state(q, prev_buf(bufnr), &state, 0); 1529 get_buf_state(q, prev_buf(bufnr), &state, 0);
1552 if (state != SLSB_CU_OUTPUT_PRIMED) 1530 if (state != SLSB_CU_OUTPUT_PRIMED)
1553 qdio_kick_outbound_q(q); 1531 rc = qdio_kick_outbound_q(q);
1554 else { 1532 else {
1555 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); 1533 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
1556 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1534 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1557 } 1535 }
1558out: 1536out:
1559 /* Fixme: could wait forever if called from process context */
1560 tasklet_schedule(&q->tasklet); 1537 tasklet_schedule(&q->tasklet);
1538 return rc;
1561} 1539}
1562 1540
1563/** 1541/**
@@ -1596,14 +1574,12 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1596 return -EBUSY; 1574 return -EBUSY;
1597 1575
1598 if (callflags & QDIO_FLAG_SYNC_INPUT) 1576 if (callflags & QDIO_FLAG_SYNC_INPUT)
1599 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, 1577 return handle_inbound(irq_ptr->input_qs[q_nr],
1600 count); 1578 callflags, bufnr, count);
1601 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1579 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1602 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, 1580 return handle_outbound(irq_ptr->output_qs[q_nr],
1603 count); 1581 callflags, bufnr, count);
1604 else 1582 return -EINVAL;
1605 return -EINVAL;
1606 return 0;
1607} 1583}
1608EXPORT_SYMBOL_GPL(do_QDIO); 1584EXPORT_SYMBOL_GPL(do_QDIO);
1609 1585
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index c08356b95bf5..18d54fc21ce9 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -117,7 +117,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
117 q->mask = 1 << (31 - i); 117 q->mask = 1 << (31 - i);
118 q->nr = i; 118 q->nr = i;
119 q->handler = handler; 119 q->handler = handler;
120 spin_lock_init(&q->lock);
121} 120}
122 121
123static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 122static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8e90e147b746..c655d011a78d 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -31,6 +31,7 @@
31 31
32/* list of thin interrupt input queues */ 32/* list of thin interrupt input queues */
33static LIST_HEAD(tiq_list); 33static LIST_HEAD(tiq_list);
34DEFINE_MUTEX(tiq_list_lock);
34 35
35/* adapter local summary indicator */ 36/* adapter local summary indicator */
36static unsigned char *tiqdio_alsi; 37static unsigned char *tiqdio_alsi;
@@ -95,12 +96,11 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 96 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
96 css_qdio_omit_svs = 1; 97 css_qdio_omit_svs = 1;
97 98
98 for_each_input_queue(irq_ptr, q, i) { 99 mutex_lock(&tiq_list_lock);
100 for_each_input_queue(irq_ptr, q, i)
99 list_add_rcu(&q->entry, &tiq_list); 101 list_add_rcu(&q->entry, &tiq_list);
100 synchronize_rcu(); 102 mutex_unlock(&tiq_list_lock);
101 }
102 xchg(irq_ptr->dsci, 1); 103 xchg(irq_ptr->dsci, 1);
103 tasklet_schedule(&tiqdio_tasklet);
104} 104}
105 105
106/* 106/*
@@ -118,7 +118,10 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
118 /* if establish triggered an error */ 118 /* if establish triggered an error */
119 if (!q || !q->entry.prev || !q->entry.next) 119 if (!q || !q->entry.prev || !q->entry.next)
120 continue; 120 continue;
121
122 mutex_lock(&tiq_list_lock);
121 list_del_rcu(&q->entry); 123 list_del_rcu(&q->entry);
124 mutex_unlock(&tiq_list_lock);
122 synchronize_rcu(); 125 synchronize_rcu();
123 } 126 }
124} 127}
@@ -155,15 +158,15 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
155 */ 158 */
156 qdio_check_outbound_after_thinint(q); 159 qdio_check_outbound_after_thinint(q);
157 160
158again:
159 if (!qdio_inbound_q_moved(q)) 161 if (!qdio_inbound_q_moved(q))
160 return; 162 return;
161 163
162 qdio_kick_inbound_handler(q); 164 qdio_kick_handler(q);
163 165
164 if (!tiqdio_inbound_q_done(q)) { 166 if (!tiqdio_inbound_q_done(q)) {
165 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 167 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
166 goto again; 168 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
169 tasklet_schedule(&q->tasklet);
167 } 170 }
168 171
169 qdio_stop_polling(q); 172 qdio_stop_polling(q);
@@ -173,7 +176,8 @@ again:
173 */ 176 */
174 if (!tiqdio_inbound_q_done(q)) { 177 if (!tiqdio_inbound_q_done(q)) {
175 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 178 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
176 goto again; 179 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
180 tasklet_schedule(&q->tasklet);
177 } 181 }
178} 182}
179 183
@@ -366,10 +370,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
366 370
367void __exit tiqdio_unregister_thinints(void) 371void __exit tiqdio_unregister_thinints(void)
368{ 372{
369 tasklet_disable(&tiqdio_tasklet); 373 WARN_ON(!list_empty(&tiq_list));
370 374
371 if (tiqdio_alsi) { 375 if (tiqdio_alsi) {
372 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 376 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
373 isc_unregister(QDIO_AIRQ_ISC); 377 isc_unregister(QDIO_AIRQ_ISC);
374 } 378 }
379 tasklet_kill(&tiqdio_tasklet);
375} 380}