diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-29 02:03:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-04-29 02:03:59 -0400 |
commit | 04b090d50c88ac8e5ec9c2e985bb65bd153893aa (patch) | |
tree | 065b815f238796bcd5523c8620d9166ad9b573f2 | |
parent | d0772b70faaf8e9f2013b6c4273d94d5eac8047a (diff) |
[AF_IUCV/IUCV]: smp_call_function deadlock
Calling smp_call_function can lead to a deadlock if it is called
from tasklet context.
Fixing this deadlock requires to move the smp_call_function from the
tasklet context to a work queue. To do that queue the path pending
interrupts to a separate list and move the path cleanup out of
iucv_path_sever to iucv_path_connect and iucv_path_pending.
This creates a new requirement for iucv_path_connect: it may not be
called from tasklet context anymore.
Also fixed compile problem for CONFIG_HOTPLUG_CPU=n and
another one when walking the cpu_online mask. When doing this,
we must disable cpu hotplug.
Signed-off-by: Frank Pavlic <fpavlic@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/iucv/iucv.h | 2 | ||||
-rw-r--r-- | net/iucv/iucv.c | 205 |
2 files changed, 133 insertions, 74 deletions
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index 746e7416261e..fd70adbb3566 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h | |||
@@ -16,7 +16,7 @@ | |||
16 | * completed a register, it can exploit the other functions. | 16 | * completed a register, it can exploit the other functions. |
17 | * For furthur reference on all IUCV functionality, refer to the | 17 | * For furthur reference on all IUCV functionality, refer to the |
18 | * CP Programming Services book, also available on the web thru | 18 | * CP Programming Services book, also available on the web thru |
19 | * www.ibm.com/s390/vm/pubs, manual # SC24-5760 | 19 | * www.vm.ibm.com/pubs, manual # SC24-6084 |
20 | * | 20 | * |
21 | * Definition of Return Codes | 21 | * Definition of Return Codes |
22 | * - All positive return codes including zero are reflected back | 22 | * - All positive return codes including zero are reflected back |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 60f293842a39..903bdb6eaaa1 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -90,20 +90,43 @@ struct iucv_irq_data { | |||
90 | u32 res2[8]; | 90 | u32 res2[8]; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct iucv_work { | 93 | struct iucv_irq_list { |
94 | struct list_head list; | 94 | struct list_head list; |
95 | struct iucv_irq_data data; | 95 | struct iucv_irq_data data; |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static LIST_HEAD(iucv_work_queue); | ||
99 | static DEFINE_SPINLOCK(iucv_work_lock); | ||
100 | |||
101 | static struct iucv_irq_data *iucv_irq_data; | 98 | static struct iucv_irq_data *iucv_irq_data; |
102 | static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; | 99 | static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; |
103 | static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; | 100 | static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; |
104 | 101 | ||
105 | static void iucv_tasklet_handler(unsigned long); | 102 | /* |
106 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0); | 103 | * Queue of interrupt buffers lock for delivery via the tasklet |
104 | * (fast but can't call smp_call_function). | ||
105 | */ | ||
106 | static LIST_HEAD(iucv_task_queue); | ||
107 | |||
108 | /* | ||
109 | * The tasklet for fast delivery of iucv interrupts. | ||
110 | */ | ||
111 | static void iucv_tasklet_fn(unsigned long); | ||
112 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); | ||
113 | |||
114 | /* | ||
115 | * Queue of interrupt buffers for delivery via a work queue | ||
116 | * (slower but can call smp_call_function). | ||
117 | */ | ||
118 | static LIST_HEAD(iucv_work_queue); | ||
119 | |||
120 | /* | ||
121 | * The work element to deliver path pending interrupts. | ||
122 | */ | ||
123 | static void iucv_work_fn(struct work_struct *work); | ||
124 | static DECLARE_WORK(iucv_work, iucv_work_fn); | ||
125 | |||
126 | /* | ||
127 | * Spinlock protecting task and work queue. | ||
128 | */ | ||
129 | static DEFINE_SPINLOCK(iucv_queue_lock); | ||
107 | 130 | ||
108 | enum iucv_command_codes { | 131 | enum iucv_command_codes { |
109 | IUCV_QUERY = 0, | 132 | IUCV_QUERY = 0, |
@@ -147,10 +170,10 @@ static unsigned long iucv_max_pathid; | |||
147 | static DEFINE_SPINLOCK(iucv_table_lock); | 170 | static DEFINE_SPINLOCK(iucv_table_lock); |
148 | 171 | ||
149 | /* | 172 | /* |
150 | * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet. | 173 | * iucv_active_cpu: contains the number of the cpu executing the tasklet |
151 | * Needed for iucv_path_sever called from tasklet. | 174 | * or the work handler. Needed for iucv_path_sever called from tasklet. |
152 | */ | 175 | */ |
153 | static int iucv_tasklet_cpu = -1; | 176 | static int iucv_active_cpu = -1; |
154 | 177 | ||
155 | /* | 178 | /* |
156 | * Mutex and wait queue for iucv_register/iucv_unregister. | 179 | * Mutex and wait queue for iucv_register/iucv_unregister. |
@@ -449,17 +472,19 @@ static void iucv_setmask_mp(void) | |||
449 | { | 472 | { |
450 | int cpu; | 473 | int cpu; |
451 | 474 | ||
475 | preempt_disable(); | ||
452 | for_each_online_cpu(cpu) | 476 | for_each_online_cpu(cpu) |
453 | /* Enable all cpus with a declared buffer. */ | 477 | /* Enable all cpus with a declared buffer. */ |
454 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 478 | if (cpu_isset(cpu, iucv_buffer_cpumask) && |
455 | !cpu_isset(cpu, iucv_irq_cpumask)) | 479 | !cpu_isset(cpu, iucv_irq_cpumask)) |
456 | smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu); | 480 | smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu); |
481 | preempt_enable(); | ||
457 | } | 482 | } |
458 | 483 | ||
459 | /** | 484 | /** |
460 | * iucv_setmask_up | 485 | * iucv_setmask_up |
461 | * | 486 | * |
462 | * Allow iucv interrupts on a single cpus. | 487 | * Allow iucv interrupts on a single cpu. |
463 | */ | 488 | */ |
464 | static void iucv_setmask_up(void) | 489 | static void iucv_setmask_up(void) |
465 | { | 490 | { |
@@ -493,8 +518,10 @@ static int iucv_enable(void) | |||
493 | goto out; | 518 | goto out; |
494 | /* Declare per cpu buffers. */ | 519 | /* Declare per cpu buffers. */ |
495 | rc = -EIO; | 520 | rc = -EIO; |
521 | preempt_disable(); | ||
496 | for_each_online_cpu(cpu) | 522 | for_each_online_cpu(cpu) |
497 | smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); | 523 | smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); |
524 | preempt_enable(); | ||
498 | if (cpus_empty(iucv_buffer_cpumask)) | 525 | if (cpus_empty(iucv_buffer_cpumask)) |
499 | /* No cpu could declare an iucv buffer. */ | 526 | /* No cpu could declare an iucv buffer. */ |
500 | goto out_path; | 527 | goto out_path; |
@@ -584,48 +611,49 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | |||
584 | return iucv_call_b2f0(IUCV_SEVER, parm); | 611 | return iucv_call_b2f0(IUCV_SEVER, parm); |
585 | } | 612 | } |
586 | 613 | ||
614 | #ifdef CONFIG_SMP | ||
587 | /** | 615 | /** |
588 | * __iucv_cleanup_pathid | 616 | * __iucv_cleanup_queue |
589 | * @dummy: unused dummy argument | 617 | * @dummy: unused dummy argument |
590 | * | 618 | * |
591 | * Nop function called via smp_call_function to force work items from | 619 | * Nop function called via smp_call_function to force work items from |
592 | * pending external iucv interrupts to the work queue. | 620 | * pending external iucv interrupts to the work queue. |
593 | */ | 621 | */ |
594 | static void __iucv_cleanup_pathid(void *dummy) | 622 | static void __iucv_cleanup_queue(void *dummy) |
595 | { | 623 | { |
596 | } | 624 | } |
625 | #endif | ||
597 | 626 | ||
598 | /** | 627 | /** |
599 | * iucv_cleanup_pathid | 628 | * iucv_cleanup_queue |
600 | * @pathid: 16 bit pathid | ||
601 | * | 629 | * |
602 | * Function called after a path has been severed to find all remaining | 630 | * Function called after a path has been severed to find all remaining |
603 | * work items for the now stale pathid. The caller needs to hold the | 631 | * work items for the now stale pathid. The caller needs to hold the |
604 | * iucv_table_lock. | 632 | * iucv_table_lock. |
605 | */ | 633 | */ |
606 | static void iucv_cleanup_pathid(u16 pathid) | 634 | static void iucv_cleanup_queue(void) |
607 | { | 635 | { |
608 | struct iucv_work *p, *n; | 636 | struct iucv_irq_list *p, *n; |
609 | 637 | ||
610 | /* | 638 | /* |
611 | * Path is severed, the pathid can be reused immediatly on | 639 | * When a path is severed, the pathid can be reused immediatly |
612 | * a iucv connect or a connection pending interrupt. | 640 | * on a iucv connect or a connection pending interrupt. Remove |
613 | * iucv_path_connect and connection pending interrupt will | 641 | * all entries from the task queue that refer to a stale pathid |
614 | * wait until the iucv_table_lock is released before the | 642 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect |
615 | * recycled pathid enters the system. | 643 | * or deliver the connection pending interrupt. To get all the |
616 | * Force remaining interrupts to the work queue, then | 644 | * pending interrupts force them to the work queue by calling |
617 | * scan the work queue for items of this path. | 645 | * an empty function on all cpus. |
618 | */ | 646 | */ |
619 | smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1); | 647 | smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); |
620 | spin_lock_irq(&iucv_work_lock); | 648 | spin_lock_irq(&iucv_queue_lock); |
621 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | 649 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { |
622 | /* Remove work items for pathid except connection pending */ | 650 | /* Remove stale work items from the task queue. */ |
623 | if (p->data.ippathid == pathid && p->data.iptype != 0x01) { | 651 | if (iucv_path_table[p->data.ippathid] == NULL) { |
624 | list_del(&p->list); | 652 | list_del(&p->list); |
625 | kfree(p); | 653 | kfree(p); |
626 | } | 654 | } |
627 | } | 655 | } |
628 | spin_unlock_irq(&iucv_work_lock); | 656 | spin_unlock_irq(&iucv_queue_lock); |
629 | } | 657 | } |
630 | 658 | ||
631 | /** | 659 | /** |
@@ -684,7 +712,6 @@ void iucv_unregister(struct iucv_handler *handler, int smp) | |||
684 | iucv_sever_pathid(p->pathid, NULL); | 712 | iucv_sever_pathid(p->pathid, NULL); |
685 | iucv_path_table[p->pathid] = NULL; | 713 | iucv_path_table[p->pathid] = NULL; |
686 | list_del(&p->list); | 714 | list_del(&p->list); |
687 | iucv_cleanup_pathid(p->pathid); | ||
688 | iucv_path_free(p); | 715 | iucv_path_free(p); |
689 | } | 716 | } |
690 | spin_unlock_bh(&iucv_table_lock); | 717 | spin_unlock_bh(&iucv_table_lock); |
@@ -757,9 +784,9 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | |||
757 | union iucv_param *parm; | 784 | union iucv_param *parm; |
758 | int rc; | 785 | int rc; |
759 | 786 | ||
760 | preempt_disable(); | 787 | BUG_ON(in_atomic()); |
761 | if (iucv_tasklet_cpu != smp_processor_id()) | 788 | spin_lock_bh(&iucv_table_lock); |
762 | spin_lock_bh(&iucv_table_lock); | 789 | iucv_cleanup_queue(); |
763 | parm = percpu_ptr(iucv_param, smp_processor_id()); | 790 | parm = percpu_ptr(iucv_param, smp_processor_id()); |
764 | memset(parm, 0, sizeof(union iucv_param)); | 791 | memset(parm, 0, sizeof(union iucv_param)); |
765 | parm->ctrl.ipmsglim = path->msglim; | 792 | parm->ctrl.ipmsglim = path->msglim; |
@@ -794,9 +821,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | |||
794 | rc = -EIO; | 821 | rc = -EIO; |
795 | } | 822 | } |
796 | } | 823 | } |
797 | if (iucv_tasklet_cpu != smp_processor_id()) | 824 | spin_unlock_bh(&iucv_table_lock); |
798 | spin_unlock_bh(&iucv_table_lock); | ||
799 | preempt_enable(); | ||
800 | return rc; | 825 | return rc; |
801 | } | 826 | } |
802 | 827 | ||
@@ -867,15 +892,14 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | |||
867 | 892 | ||
868 | 893 | ||
869 | preempt_disable(); | 894 | preempt_disable(); |
870 | if (iucv_tasklet_cpu != smp_processor_id()) | 895 | if (iucv_active_cpu != smp_processor_id()) |
871 | spin_lock_bh(&iucv_table_lock); | 896 | spin_lock_bh(&iucv_table_lock); |
872 | rc = iucv_sever_pathid(path->pathid, userdata); | 897 | rc = iucv_sever_pathid(path->pathid, userdata); |
873 | if (!rc) { | 898 | if (!rc) { |
874 | iucv_path_table[path->pathid] = NULL; | 899 | iucv_path_table[path->pathid] = NULL; |
875 | list_del_init(&path->list); | 900 | list_del_init(&path->list); |
876 | iucv_cleanup_pathid(path->pathid); | ||
877 | } | 901 | } |
878 | if (iucv_tasklet_cpu != smp_processor_id()) | 902 | if (iucv_active_cpu != smp_processor_id()) |
879 | spin_unlock_bh(&iucv_table_lock); | 903 | spin_unlock_bh(&iucv_table_lock); |
880 | preempt_enable(); | 904 | preempt_enable(); |
881 | return rc; | 905 | return rc; |
@@ -1244,8 +1268,7 @@ static void iucv_path_complete(struct iucv_irq_data *data) | |||
1244 | struct iucv_path_complete *ipc = (void *) data; | 1268 | struct iucv_path_complete *ipc = (void *) data; |
1245 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | 1269 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; |
1246 | 1270 | ||
1247 | BUG_ON(!path || !path->handler); | 1271 | if (path && path->handler && path->handler->path_complete) |
1248 | if (path->handler->path_complete) | ||
1249 | path->handler->path_complete(path, ipc->ipuser); | 1272 | path->handler->path_complete(path, ipc->ipuser); |
1250 | } | 1273 | } |
1251 | 1274 | ||
@@ -1273,14 +1296,14 @@ static void iucv_path_severed(struct iucv_irq_data *data) | |||
1273 | struct iucv_path_severed *ips = (void *) data; | 1296 | struct iucv_path_severed *ips = (void *) data; |
1274 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | 1297 | struct iucv_path *path = iucv_path_table[ips->ippathid]; |
1275 | 1298 | ||
1276 | BUG_ON(!path || !path->handler); | 1299 | if (!path || !path->handler) /* Already severed */ |
1300 | return; | ||
1277 | if (path->handler->path_severed) | 1301 | if (path->handler->path_severed) |
1278 | path->handler->path_severed(path, ips->ipuser); | 1302 | path->handler->path_severed(path, ips->ipuser); |
1279 | else { | 1303 | else { |
1280 | iucv_sever_pathid(path->pathid, NULL); | 1304 | iucv_sever_pathid(path->pathid, NULL); |
1281 | iucv_path_table[path->pathid] = NULL; | 1305 | iucv_path_table[path->pathid] = NULL; |
1282 | list_del_init(&path->list); | 1306 | list_del_init(&path->list); |
1283 | iucv_cleanup_pathid(path->pathid); | ||
1284 | iucv_path_free(path); | 1307 | iucv_path_free(path); |
1285 | } | 1308 | } |
1286 | } | 1309 | } |
@@ -1309,8 +1332,7 @@ static void iucv_path_quiesced(struct iucv_irq_data *data) | |||
1309 | struct iucv_path_quiesced *ipq = (void *) data; | 1332 | struct iucv_path_quiesced *ipq = (void *) data; |
1310 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | 1333 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; |
1311 | 1334 | ||
1312 | BUG_ON(!path || !path->handler); | 1335 | if (path && path->handler && path->handler->path_quiesced) |
1313 | if (path->handler->path_quiesced) | ||
1314 | path->handler->path_quiesced(path, ipq->ipuser); | 1336 | path->handler->path_quiesced(path, ipq->ipuser); |
1315 | } | 1337 | } |
1316 | 1338 | ||
@@ -1338,8 +1360,7 @@ static void iucv_path_resumed(struct iucv_irq_data *data) | |||
1338 | struct iucv_path_resumed *ipr = (void *) data; | 1360 | struct iucv_path_resumed *ipr = (void *) data; |
1339 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | 1361 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; |
1340 | 1362 | ||
1341 | BUG_ON(!path || !path->handler); | 1363 | if (path && path->handler && path->handler->path_resumed) |
1342 | if (path->handler->path_resumed) | ||
1343 | path->handler->path_resumed(path, ipr->ipuser); | 1364 | path->handler->path_resumed(path, ipr->ipuser); |
1344 | } | 1365 | } |
1345 | 1366 | ||
@@ -1371,8 +1392,7 @@ static void iucv_message_complete(struct iucv_irq_data *data) | |||
1371 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | 1392 | struct iucv_path *path = iucv_path_table[imc->ippathid]; |
1372 | struct iucv_message msg; | 1393 | struct iucv_message msg; |
1373 | 1394 | ||
1374 | BUG_ON(!path || !path->handler); | 1395 | if (path && path->handler && path->handler->message_complete) { |
1375 | if (path->handler->message_complete) { | ||
1376 | msg.flags = imc->ipflags1; | 1396 | msg.flags = imc->ipflags1; |
1377 | msg.id = imc->ipmsgid; | 1397 | msg.id = imc->ipmsgid; |
1378 | msg.audit = imc->ipaudit; | 1398 | msg.audit = imc->ipaudit; |
@@ -1417,8 +1437,7 @@ static void iucv_message_pending(struct iucv_irq_data *data) | |||
1417 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | 1437 | struct iucv_path *path = iucv_path_table[imp->ippathid]; |
1418 | struct iucv_message msg; | 1438 | struct iucv_message msg; |
1419 | 1439 | ||
1420 | BUG_ON(!path || !path->handler); | 1440 | if (path && path->handler && path->handler->message_pending) { |
1421 | if (path->handler->message_pending) { | ||
1422 | msg.flags = imp->ipflags1; | 1441 | msg.flags = imp->ipflags1; |
1423 | msg.id = imp->ipmsgid; | 1442 | msg.id = imp->ipmsgid; |
1424 | msg.class = imp->iptrgcls; | 1443 | msg.class = imp->iptrgcls; |
@@ -1433,17 +1452,16 @@ static void iucv_message_pending(struct iucv_irq_data *data) | |||
1433 | } | 1452 | } |
1434 | 1453 | ||
1435 | /** | 1454 | /** |
1436 | * iucv_tasklet_handler: | 1455 | * iucv_tasklet_fn: |
1437 | * | 1456 | * |
1438 | * This tasklet loops over the queue of irq buffers created by | 1457 | * This tasklet loops over the queue of irq buffers created by |
1439 | * iucv_external_interrupt, calls the appropriate action handler | 1458 | * iucv_external_interrupt, calls the appropriate action handler |
1440 | * and then frees the buffer. | 1459 | * and then frees the buffer. |
1441 | */ | 1460 | */ |
1442 | static void iucv_tasklet_handler(unsigned long ignored) | 1461 | static void iucv_tasklet_fn(unsigned long ignored) |
1443 | { | 1462 | { |
1444 | typedef void iucv_irq_fn(struct iucv_irq_data *); | 1463 | typedef void iucv_irq_fn(struct iucv_irq_data *); |
1445 | static iucv_irq_fn *irq_fn[] = { | 1464 | static iucv_irq_fn *irq_fn[] = { |
1446 | [0x01] = iucv_path_pending, | ||
1447 | [0x02] = iucv_path_complete, | 1465 | [0x02] = iucv_path_complete, |
1448 | [0x03] = iucv_path_severed, | 1466 | [0x03] = iucv_path_severed, |
1449 | [0x04] = iucv_path_quiesced, | 1467 | [0x04] = iucv_path_quiesced, |
@@ -1453,38 +1471,70 @@ static void iucv_tasklet_handler(unsigned long ignored) | |||
1453 | [0x08] = iucv_message_pending, | 1471 | [0x08] = iucv_message_pending, |
1454 | [0x09] = iucv_message_pending, | 1472 | [0x09] = iucv_message_pending, |
1455 | }; | 1473 | }; |
1456 | struct iucv_work *p; | 1474 | struct list_head task_queue = LIST_HEAD_INIT(task_queue); |
1475 | struct iucv_irq_list *p, *n; | ||
1457 | 1476 | ||
1458 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 1477 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ |
1459 | spin_lock(&iucv_table_lock); | 1478 | spin_lock(&iucv_table_lock); |
1460 | iucv_tasklet_cpu = smp_processor_id(); | 1479 | iucv_active_cpu = smp_processor_id(); |
1461 | 1480 | ||
1462 | spin_lock_irq(&iucv_work_lock); | 1481 | spin_lock_irq(&iucv_queue_lock); |
1463 | while (!list_empty(&iucv_work_queue)) { | 1482 | list_splice_init(&iucv_task_queue, &task_queue); |
1464 | p = list_entry(iucv_work_queue.next, struct iucv_work, list); | 1483 | spin_unlock_irq(&iucv_queue_lock); |
1484 | |||
1485 | list_for_each_entry_safe(p, n, &task_queue, list) { | ||
1465 | list_del_init(&p->list); | 1486 | list_del_init(&p->list); |
1466 | spin_unlock_irq(&iucv_work_lock); | ||
1467 | irq_fn[p->data.iptype](&p->data); | 1487 | irq_fn[p->data.iptype](&p->data); |
1468 | kfree(p); | 1488 | kfree(p); |
1469 | spin_lock_irq(&iucv_work_lock); | ||
1470 | } | 1489 | } |
1471 | spin_unlock_irq(&iucv_work_lock); | ||
1472 | 1490 | ||
1473 | iucv_tasklet_cpu = -1; | 1491 | iucv_active_cpu = -1; |
1474 | spin_unlock(&iucv_table_lock); | 1492 | spin_unlock(&iucv_table_lock); |
1475 | } | 1493 | } |
1476 | 1494 | ||
1477 | /** | 1495 | /** |
1496 | * iucv_work_fn: | ||
1497 | * | ||
1498 | * This work function loops over the queue of path pending irq blocks | ||
1499 | * created by iucv_external_interrupt, calls the appropriate action | ||
1500 | * handler and then frees the buffer. | ||
1501 | */ | ||
1502 | static void iucv_work_fn(struct work_struct *work) | ||
1503 | { | ||
1504 | typedef void iucv_irq_fn(struct iucv_irq_data *); | ||
1505 | struct list_head work_queue = LIST_HEAD_INIT(work_queue); | ||
1506 | struct iucv_irq_list *p, *n; | ||
1507 | |||
1508 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | ||
1509 | spin_lock_bh(&iucv_table_lock); | ||
1510 | iucv_active_cpu = smp_processor_id(); | ||
1511 | |||
1512 | spin_lock_irq(&iucv_queue_lock); | ||
1513 | list_splice_init(&iucv_work_queue, &work_queue); | ||
1514 | spin_unlock_irq(&iucv_queue_lock); | ||
1515 | |||
1516 | iucv_cleanup_queue(); | ||
1517 | list_for_each_entry_safe(p, n, &work_queue, list) { | ||
1518 | list_del_init(&p->list); | ||
1519 | iucv_path_pending(&p->data); | ||
1520 | kfree(p); | ||
1521 | } | ||
1522 | |||
1523 | iucv_active_cpu = -1; | ||
1524 | spin_unlock_bh(&iucv_table_lock); | ||
1525 | } | ||
1526 | |||
1527 | /** | ||
1478 | * iucv_external_interrupt | 1528 | * iucv_external_interrupt |
1479 | * @code: irq code | 1529 | * @code: irq code |
1480 | * | 1530 | * |
1481 | * Handles external interrupts coming in from CP. | 1531 | * Handles external interrupts coming in from CP. |
1482 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). | 1532 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). |
1483 | */ | 1533 | */ |
1484 | static void iucv_external_interrupt(u16 code) | 1534 | static void iucv_external_interrupt(u16 code) |
1485 | { | 1535 | { |
1486 | struct iucv_irq_data *p; | 1536 | struct iucv_irq_data *p; |
1487 | struct iucv_work *work; | 1537 | struct iucv_irq_list *work; |
1488 | 1538 | ||
1489 | p = percpu_ptr(iucv_irq_data, smp_processor_id()); | 1539 | p = percpu_ptr(iucv_irq_data, smp_processor_id()); |
1490 | if (p->ippathid >= iucv_max_pathid) { | 1540 | if (p->ippathid >= iucv_max_pathid) { |
@@ -1498,16 +1548,23 @@ static void iucv_external_interrupt(u16 code) | |||
1498 | printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); | 1548 | printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); |
1499 | return; | 1549 | return; |
1500 | } | 1550 | } |
1501 | work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC); | 1551 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
1502 | if (!work) { | 1552 | if (!work) { |
1503 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); | 1553 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); |
1504 | return; | 1554 | return; |
1505 | } | 1555 | } |
1506 | memcpy(&work->data, p, sizeof(work->data)); | 1556 | memcpy(&work->data, p, sizeof(work->data)); |
1507 | spin_lock(&iucv_work_lock); | 1557 | spin_lock(&iucv_queue_lock); |
1508 | list_add_tail(&work->list, &iucv_work_queue); | 1558 | if (p->iptype == 0x01) { |
1509 | spin_unlock(&iucv_work_lock); | 1559 | /* Path pending interrupt. */ |
1510 | tasklet_schedule(&iucv_tasklet); | 1560 | list_add_tail(&work->list, &iucv_work_queue); |
1561 | schedule_work(&iucv_work); | ||
1562 | } else { | ||
1563 | /* The other interrupts. */ | ||
1564 | list_add_tail(&work->list, &iucv_task_queue); | ||
1565 | tasklet_schedule(&iucv_tasklet); | ||
1566 | } | ||
1567 | spin_unlock(&iucv_queue_lock); | ||
1511 | } | 1568 | } |
1512 | 1569 | ||
1513 | /** | 1570 | /** |
@@ -1577,12 +1634,14 @@ out: | |||
1577 | */ | 1634 | */ |
1578 | static void iucv_exit(void) | 1635 | static void iucv_exit(void) |
1579 | { | 1636 | { |
1580 | struct iucv_work *p, *n; | 1637 | struct iucv_irq_list *p, *n; |
1581 | 1638 | ||
1582 | spin_lock_irq(&iucv_work_lock); | 1639 | spin_lock_irq(&iucv_queue_lock); |
1640 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) | ||
1641 | kfree(p); | ||
1583 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | 1642 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) |
1584 | kfree(p); | 1643 | kfree(p); |
1585 | spin_unlock_irq(&iucv_work_lock); | 1644 | spin_unlock_irq(&iucv_queue_lock); |
1586 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 1645 | unregister_hotcpu_notifier(&iucv_cpu_notifier); |
1587 | percpu_free(iucv_param); | 1646 | percpu_free(iucv_param); |
1588 | percpu_free(iucv_irq_data); | 1647 | percpu_free(iucv_irq_data); |