diff options
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 8 | ||||
-rw-r--r-- | net/iucv/iucv.c | 25 |
2 files changed, 12 insertions, 21 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 7b0038f45b16..bda71015885c 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1135,8 +1135,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1135 | if (this) | 1135 | if (this) |
1136 | kfree_skb(this); | 1136 | kfree_skb(this); |
1137 | } | 1137 | } |
1138 | if (!this) | 1138 | BUG_ON(!this); |
1139 | printk(KERN_ERR "AF_IUCV msg tag %u not found\n", msg->tag); | ||
1140 | 1139 | ||
1141 | if (sk->sk_state == IUCV_CLOSING) { | 1140 | if (sk->sk_state == IUCV_CLOSING) { |
1142 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | 1141 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { |
@@ -1196,7 +1195,7 @@ static int __init afiucv_init(void) | |||
1196 | } | 1195 | } |
1197 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); | 1196 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); |
1198 | if (unlikely(err)) { | 1197 | if (unlikely(err)) { |
1199 | printk(KERN_ERR "AF_IUCV needs the VM userid\n"); | 1198 | WARN_ON(err); |
1200 | err = -EPROTONOSUPPORT; | 1199 | err = -EPROTONOSUPPORT; |
1201 | goto out; | 1200 | goto out; |
1202 | } | 1201 | } |
@@ -1210,7 +1209,6 @@ static int __init afiucv_init(void) | |||
1210 | err = sock_register(&iucv_sock_family_ops); | 1209 | err = sock_register(&iucv_sock_family_ops); |
1211 | if (err) | 1210 | if (err) |
1212 | goto out_proto; | 1211 | goto out_proto; |
1213 | printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n"); | ||
1214 | return 0; | 1212 | return 0; |
1215 | 1213 | ||
1216 | out_proto: | 1214 | out_proto: |
@@ -1226,8 +1224,6 @@ static void __exit afiucv_exit(void) | |||
1226 | sock_unregister(PF_IUCV); | 1224 | sock_unregister(PF_IUCV); |
1227 | proto_unregister(&iucv_proto); | 1225 | proto_unregister(&iucv_proto); |
1228 | iucv_unregister(&af_iucv_handler, 0); | 1226 | iucv_unregister(&af_iucv_handler, 0); |
1229 | |||
1230 | printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n"); | ||
1231 | } | 1227 | } |
1232 | 1228 | ||
1233 | module_init(afiucv_init); | 1229 | module_init(afiucv_init); |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8de511070593..411b339a0c8a 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void) | |||
480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && |
481 | !cpu_isset(cpu, iucv_irq_cpumask)) | 481 | !cpu_isset(cpu, iucv_irq_cpumask)) |
482 | smp_call_function_single(cpu, iucv_allow_cpu, | 482 | smp_call_function_single(cpu, iucv_allow_cpu, |
483 | NULL, 0, 1); | 483 | NULL, 1); |
484 | preempt_enable(); | 484 | preempt_enable(); |
485 | } | 485 | } |
486 | 486 | ||
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void) | |||
498 | cpumask = iucv_irq_cpumask; | 498 | cpumask = iucv_irq_cpumask; |
499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); |
500 | for_each_cpu_mask_nr(cpu, cpumask) | 500 | for_each_cpu_mask_nr(cpu, cpumask) |
501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); | 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
502 | } | 502 | } |
503 | 503 | ||
504 | /** | 504 | /** |
@@ -523,7 +523,7 @@ static int iucv_enable(void) | |||
523 | rc = -EIO; | 523 | rc = -EIO; |
524 | preempt_disable(); | 524 | preempt_disable(); |
525 | for_each_online_cpu(cpu) | 525 | for_each_online_cpu(cpu) |
526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
527 | preempt_enable(); | 527 | preempt_enable(); |
528 | if (cpus_empty(iucv_buffer_cpumask)) | 528 | if (cpus_empty(iucv_buffer_cpumask)) |
529 | /* No cpu could declare an iucv buffer. */ | 529 | /* No cpu could declare an iucv buffer. */ |
@@ -545,7 +545,7 @@ out: | |||
545 | */ | 545 | */ |
546 | static void iucv_disable(void) | 546 | static void iucv_disable(void) |
547 | { | 547 | { |
548 | on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); | 548 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
549 | kfree(iucv_path_table); | 549 | kfree(iucv_path_table); |
550 | } | 550 | } |
551 | 551 | ||
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
580 | case CPU_ONLINE_FROZEN: | 580 | case CPU_ONLINE_FROZEN: |
581 | case CPU_DOWN_FAILED: | 581 | case CPU_DOWN_FAILED: |
582 | case CPU_DOWN_FAILED_FROZEN: | 582 | case CPU_DOWN_FAILED_FROZEN: |
583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
584 | break; | 584 | break; |
585 | case CPU_DOWN_PREPARE: | 585 | case CPU_DOWN_PREPARE: |
586 | case CPU_DOWN_PREPARE_FROZEN: | 586 | case CPU_DOWN_PREPARE_FROZEN: |
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
589 | if (cpus_empty(cpumask)) | 589 | if (cpus_empty(cpumask)) |
590 | /* Can't offline last IUCV enabled cpu. */ | 590 | /* Can't offline last IUCV enabled cpu. */ |
591 | return NOTIFY_BAD; | 591 | return NOTIFY_BAD; |
592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); | 592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
593 | if (cpus_empty(iucv_irq_cpumask)) | 593 | if (cpus_empty(iucv_irq_cpumask)) |
594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
595 | iucv_allow_cpu, NULL, 0, 1); | 595 | iucv_allow_cpu, NULL, 1); |
596 | break; | 596 | break; |
597 | } | 597 | } |
598 | return NOTIFY_OK; | 598 | return NOTIFY_OK; |
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void) | |||
652 | * pending interrupts force them to the work queue by calling | 652 | * pending interrupts force them to the work queue by calling |
653 | * an empty function on all cpus. | 653 | * an empty function on all cpus. |
654 | */ | 654 | */ |
655 | smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); | 655 | smp_call_function(__iucv_cleanup_queue, NULL, 1); |
656 | spin_lock_irq(&iucv_queue_lock); | 656 | spin_lock_irq(&iucv_queue_lock); |
657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { |
658 | /* Remove stale work items from the task queue. */ | 658 | /* Remove stale work items from the task queue. */ |
@@ -1559,16 +1559,11 @@ static void iucv_external_interrupt(u16 code) | |||
1559 | 1559 | ||
1560 | p = iucv_irq_data[smp_processor_id()]; | 1560 | p = iucv_irq_data[smp_processor_id()]; |
1561 | if (p->ippathid >= iucv_max_pathid) { | 1561 | if (p->ippathid >= iucv_max_pathid) { |
1562 | printk(KERN_WARNING "iucv_do_int: Got interrupt with " | 1562 | WARN_ON(p->ippathid >= iucv_max_pathid); |
1563 | "pathid %d > max_connections (%ld)\n", | ||
1564 | p->ippathid, iucv_max_pathid - 1); | ||
1565 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | 1563 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); |
1566 | return; | 1564 | return; |
1567 | } | 1565 | } |
1568 | if (p->iptype < 0x01 || p->iptype > 0x09) { | 1566 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); |
1569 | printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); | ||
1570 | return; | ||
1571 | } | ||
1572 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 1567 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
1573 | if (!work) { | 1568 | if (!work) { |
1574 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); | 1569 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); |