aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2009-04-21 19:26:20 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-23 07:04:28 -0400
commit42e1b4c2c6c823ae26e64c557addf5329a7735b7 (patch)
tree4d075b3cac161fc888372b9db656a8802e9bf0ed /net/iucv
parent362b76edb78923face033e18e4ffc85df8db0f28 (diff)
iucv: provide second per-cpu IUCV command parameter block
Some of the IUCV commands can be invoked in interrupt context. Those commands need a different per-cpu IUCV command parameter block, otherwise they might overwrite an IUCV command parameter of a not yet finished IUCV command invocation in process context. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/iucv.c41
1 files changed, 31 insertions, 10 deletions
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a35240f61ec3..fcf404065f12 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -280,6 +280,7 @@ union iucv_param {
280 * Anchor for per-cpu IUCV command parameter block. 280 * Anchor for per-cpu IUCV command parameter block.
281 */ 281 */
282static union iucv_param *iucv_param[NR_CPUS]; 282static union iucv_param *iucv_param[NR_CPUS];
283static union iucv_param *iucv_param_irq[NR_CPUS];
283 284
284/** 285/**
285 * iucv_call_b2f0 286 * iucv_call_b2f0
@@ -358,7 +359,7 @@ static void iucv_allow_cpu(void *data)
358 * 0x10 - Flag to allow priority message completion interrupts 359 * 0x10 - Flag to allow priority message completion interrupts
359 * 0x08 - Flag to allow IUCV control interrupts 360 * 0x08 - Flag to allow IUCV control interrupts
360 */ 361 */
361 parm = iucv_param[cpu]; 362 parm = iucv_param_irq[cpu];
362 memset(parm, 0, sizeof(union iucv_param)); 363 memset(parm, 0, sizeof(union iucv_param));
363 parm->set_mask.ipmask = 0xf8; 364 parm->set_mask.ipmask = 0xf8;
364 iucv_call_b2f0(IUCV_SETMASK, parm); 365 iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -379,7 +380,7 @@ static void iucv_block_cpu(void *data)
379 union iucv_param *parm; 380 union iucv_param *parm;
380 381
381 /* Disable all iucv interrupts. */ 382 /* Disable all iucv interrupts. */
382 parm = iucv_param[cpu]; 383 parm = iucv_param_irq[cpu];
383 memset(parm, 0, sizeof(union iucv_param)); 384 memset(parm, 0, sizeof(union iucv_param));
384 iucv_call_b2f0(IUCV_SETMASK, parm); 385 iucv_call_b2f0(IUCV_SETMASK, parm);
385 386
@@ -403,7 +404,7 @@ static void iucv_declare_cpu(void *data)
403 return; 404 return;
404 405
405 /* Declare interrupt buffer. */ 406 /* Declare interrupt buffer. */
406 parm = iucv_param[cpu]; 407 parm = iucv_param_irq[cpu];
407 memset(parm, 0, sizeof(union iucv_param)); 408 memset(parm, 0, sizeof(union iucv_param));
408 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 409 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
409 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 410 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
@@ -460,7 +461,7 @@ static void iucv_retrieve_cpu(void *data)
460 iucv_block_cpu(NULL); 461 iucv_block_cpu(NULL);
461 462
462 /* Retrieve interrupt buffer. */ 463 /* Retrieve interrupt buffer. */
463 parm = iucv_param[cpu]; 464 parm = iucv_param_irq[cpu];
464 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 465 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
465 466
466 /* Clear indication that an iucv buffer exists for this cpu. */ 467 /* Clear indication that an iucv buffer exists for this cpu. */
@@ -574,11 +575,22 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
574 iucv_irq_data[cpu] = NULL; 575 iucv_irq_data[cpu] = NULL;
575 return NOTIFY_BAD; 576 return NOTIFY_BAD;
576 } 577 }
578 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
579 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
580 if (!iucv_param_irq[cpu]) {
581 kfree(iucv_param[cpu]);
582 iucv_param[cpu] = NULL;
583 kfree(iucv_irq_data[cpu]);
584 iucv_irq_data[cpu] = NULL;
585 return NOTIFY_BAD;
586 }
577 break; 587 break;
578 case CPU_UP_CANCELED: 588 case CPU_UP_CANCELED:
579 case CPU_UP_CANCELED_FROZEN: 589 case CPU_UP_CANCELED_FROZEN:
580 case CPU_DEAD: 590 case CPU_DEAD:
581 case CPU_DEAD_FROZEN: 591 case CPU_DEAD_FROZEN:
592 kfree(iucv_param_irq[cpu]);
593 iucv_param_irq[cpu] = NULL;
582 kfree(iucv_param[cpu]); 594 kfree(iucv_param[cpu]);
583 iucv_param[cpu] = NULL; 595 iucv_param[cpu] = NULL;
584 kfree(iucv_irq_data[cpu]); 596 kfree(iucv_irq_data[cpu]);
@@ -625,7 +637,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
625{ 637{
626 union iucv_param *parm; 638 union iucv_param *parm;
627 639
628 parm = iucv_param[smp_processor_id()]; 640 parm = iucv_param_irq[smp_processor_id()];
629 memset(parm, 0, sizeof(union iucv_param)); 641 memset(parm, 0, sizeof(union iucv_param));
630 if (userdata) 642 if (userdata)
631 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 643 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -918,10 +930,8 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
918 if (iucv_active_cpu != smp_processor_id()) 930 if (iucv_active_cpu != smp_processor_id())
919 spin_lock_bh(&iucv_table_lock); 931 spin_lock_bh(&iucv_table_lock);
920 rc = iucv_sever_pathid(path->pathid, userdata); 932 rc = iucv_sever_pathid(path->pathid, userdata);
921 if (!rc) { 933 iucv_path_table[path->pathid] = NULL;
922 iucv_path_table[path->pathid] = NULL; 934 list_del_init(&path->list);
923 list_del_init(&path->list);
924 }
925 if (iucv_active_cpu != smp_processor_id()) 935 if (iucv_active_cpu != smp_processor_id())
926 spin_unlock_bh(&iucv_table_lock); 936 spin_unlock_bh(&iucv_table_lock);
927 preempt_enable(); 937 preempt_enable();
@@ -1413,7 +1423,7 @@ static void iucv_path_severed(struct iucv_irq_data *data)
1413 else { 1423 else {
1414 iucv_sever_pathid(path->pathid, NULL); 1424 iucv_sever_pathid(path->pathid, NULL);
1415 iucv_path_table[path->pathid] = NULL; 1425 iucv_path_table[path->pathid] = NULL;
1416 list_del_init(&path->list); 1426 list_del(&path->list);
1417 iucv_path_free(path); 1427 iucv_path_free(path);
1418 } 1428 }
1419} 1429}
@@ -1717,6 +1727,13 @@ static int __init iucv_init(void)
1717 rc = -ENOMEM; 1727 rc = -ENOMEM;
1718 goto out_free; 1728 goto out_free;
1719 } 1729 }
1730 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
1731 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1732 if (!iucv_param_irq[cpu]) {
1733 rc = -ENOMEM;
1734 goto out_free;
1735 }
1736
1720 } 1737 }
1721 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 1738 rc = register_hotcpu_notifier(&iucv_cpu_notifier);
1722 if (rc) 1739 if (rc)
@@ -1734,6 +1751,8 @@ out_cpu:
1734 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1751 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1735out_free: 1752out_free:
1736 for_each_possible_cpu(cpu) { 1753 for_each_possible_cpu(cpu) {
1754 kfree(iucv_param_irq[cpu]);
1755 iucv_param_irq[cpu] = NULL;
1737 kfree(iucv_param[cpu]); 1756 kfree(iucv_param[cpu]);
1738 iucv_param[cpu] = NULL; 1757 iucv_param[cpu] = NULL;
1739 kfree(iucv_irq_data[cpu]); 1758 kfree(iucv_irq_data[cpu]);
@@ -1764,6 +1783,8 @@ static void __exit iucv_exit(void)
1764 spin_unlock_irq(&iucv_queue_lock); 1783 spin_unlock_irq(&iucv_queue_lock);
1765 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1784 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1766 for_each_possible_cpu(cpu) { 1785 for_each_possible_cpu(cpu) {
1786 kfree(iucv_param_irq[cpu]);
1787 iucv_param_irq[cpu] = NULL;
1767 kfree(iucv_param[cpu]); 1788 kfree(iucv_param[cpu]);
1768 iucv_param[cpu] = NULL; 1789 iucv_param[cpu] = NULL;
1769 kfree(iucv_irq_data[cpu]); 1790 kfree(iucv_irq_data[cpu]);