aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-03-04 18:15:00 -0500
committerTony Luck <tony.luck@intel.com>2008-03-06 12:19:27 -0500
commitd4ed80841ad4a1d59decccfbe2d010558568c5fb (patch)
tree81ebf4a87688f4b0cc46f74266b5b0cac76932b0 /arch/ia64/kernel
parent2d9b06c72a9f2e6042d72df7d9000a48bcba34f0 (diff)
[IA64] remove remaining __FUNCTION__ occurrences
__FUNCTION__ is gcc-specific, use __func__ Long lines have been kept where they exist, some small spacing changes have been done. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kernel/iosapic.c24
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/mca.c73
-rw-r--r--arch/ia64/kernel/module.c22
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c4
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/setup.c8
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c102
12 files changed, 128 insertions, 131 deletions
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f1cf2df97a2d..fbe742ad2fde 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
155 if (val == DIE_INIT_MONARCH_LEAVE) 155 if (val == DIE_INIT_MONARCH_LEAVE)
156 ia64_mca_printk(KERN_NOTICE 156 ia64_mca_printk(KERN_NOTICE
157 "%s: kdump not configured\n", 157 "%s: kdump not configured\n",
158 __FUNCTION__); 158 __func__);
159 return NOTIFY_DONE; 159 return NOTIFY_DONE;
160 } 160 }
161 161
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 919070a9aed7..78f50e81cd95 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -379,8 +379,8 @@ efi_get_pal_addr (void)
379 * a dedicated ITR for the PAL code. 379 * a dedicated ITR for the PAL code.
380 */ 380 */
381 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 if ((vaddr & mask) == (KERNEL_START & mask)) {
382 printk(KERN_INFO "%s: no need to install ITR for " 382 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
383 "PAL code\n", __FUNCTION__); 383 __func__);
384 continue; 384 continue;
385 } 385 }
386 386
@@ -399,7 +399,7 @@ efi_get_pal_addr (void)
399 return __va(md->phys_addr); 399 return __va(md->phys_addr);
400 } 400 }
401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
402 __FUNCTION__); 402 __func__);
403 return NULL; 403 return NULL;
404} 404}
405 405
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7b3292282dea..082c31dcfd99 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -534,7 +534,7 @@ iosapic_reassign_vector (int irq)
534 if (iosapic_intr_info[irq].count) { 534 if (iosapic_intr_info[irq].count) {
535 new_irq = create_irq(); 535 new_irq = create_irq();
536 if (new_irq < 0) 536 if (new_irq < 0)
537 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 537 panic("%s: out of interrupt vectors!\n", __func__);
538 printk(KERN_INFO "Reassigning vector %d to %d\n", 538 printk(KERN_INFO "Reassigning vector %d to %d\n",
539 irq_to_vector(irq), irq_to_vector(new_irq)); 539 irq_to_vector(irq), irq_to_vector(new_irq));
540 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], 540 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
@@ -599,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
599 index = find_iosapic(gsi); 599 index = find_iosapic(gsi);
600 if (index < 0) { 600 if (index < 0) {
601 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 601 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
602 __FUNCTION__, gsi); 602 __func__, gsi);
603 return -ENODEV; 603 return -ENODEV;
604 } 604 }
605 605
@@ -608,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
608 rte = iosapic_alloc_rte(); 608 rte = iosapic_alloc_rte();
609 if (!rte) { 609 if (!rte) {
610 printk(KERN_WARNING "%s: cannot allocate memory\n", 610 printk(KERN_WARNING "%s: cannot allocate memory\n",
611 __FUNCTION__); 611 __func__);
612 return -ENOMEM; 612 return -ENOMEM;
613 } 613 }
614 614
@@ -625,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
625 (info->trigger != trigger || info->polarity != polarity)){ 625 (info->trigger != trigger || info->polarity != polarity)){
626 printk (KERN_WARNING 626 printk (KERN_WARNING
627 "%s: cannot override the interrupt\n", 627 "%s: cannot override the interrupt\n",
628 __FUNCTION__); 628 __func__);
629 return -EINVAL; 629 return -EINVAL;
630 } 630 }
631 rte->refcnt++; 631 rte->refcnt++;
@@ -647,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
647 if (idesc->chip != &no_irq_type) 647 if (idesc->chip != &no_irq_type)
648 printk(KERN_WARNING 648 printk(KERN_WARNING
649 "%s: changing vector %d from %s to %s\n", 649 "%s: changing vector %d from %s to %s\n",
650 __FUNCTION__, irq_to_vector(irq), 650 __func__, irq_to_vector(irq),
651 idesc->chip->name, irq_type->name); 651 idesc->chip->name, irq_type->name);
652 idesc->chip = irq_type; 652 idesc->chip = irq_type;
653 } 653 }
@@ -920,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
920 case ACPI_INTERRUPT_INIT: 920 case ACPI_INTERRUPT_INIT:
921 irq = create_irq(); 921 irq = create_irq();
922 if (irq < 0) 922 if (irq < 0)
923 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 923 panic("%s: out of interrupt vectors!\n", __func__);
924 vector = irq_to_vector(irq); 924 vector = irq_to_vector(irq);
925 delivery = IOSAPIC_INIT; 925 delivery = IOSAPIC_INIT;
926 break; 926 break;
@@ -931,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
931 mask = 1; 931 mask = 1;
932 break; 932 break;
933 default: 933 default:
934 printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, 934 printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
935 int_type); 935 int_type);
936 return -1; 936 return -1;
937 } 937 }
@@ -996,7 +996,7 @@ iosapic_system_init (int system_pcat_compat)
996 */ 996 */
997 printk(KERN_INFO 997 printk(KERN_INFO
998 "%s: Disabling PC-AT compatible 8259 interrupts\n", 998 "%s: Disabling PC-AT compatible 8259 interrupts\n",
999 __FUNCTION__); 999 __func__);
1000 outb(0xff, 0xA1); 1000 outb(0xff, 0xA1);
1001 outb(0xff, 0x21); 1001 outb(0xff, 0x21);
1002 } 1002 }
@@ -1011,7 +1011,7 @@ iosapic_alloc (void)
1011 if (!iosapic_lists[index].addr) 1011 if (!iosapic_lists[index].addr)
1012 return index; 1012 return index;
1013 1013
1014 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__); 1014 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
1015 return -1; 1015 return -1;
1016} 1016}
1017 1017
@@ -1109,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base)
1109 index = find_iosapic(gsi_base); 1109 index = find_iosapic(gsi_base);
1110 if (index < 0) { 1110 if (index < 0) {
1111 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", 1111 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1112 __FUNCTION__, gsi_base); 1112 __func__, gsi_base);
1113 goto out; 1113 goto out;
1114 } 1114 }
1115 1115
1116 if (iosapic_lists[index].rtes_inuse) { 1116 if (iosapic_lists[index].rtes_inuse) {
1117 err = -EBUSY; 1117 err = -EBUSY;
1118 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", 1118 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1119 __FUNCTION__, gsi_base); 1119 __func__, gsi_base);
1120 goto out; 1120 goto out;
1121 } 1121 }
1122 1122
@@ -1137,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1137 index = find_iosapic(gsi_base); 1137 index = find_iosapic(gsi_base);
1138 if (index < 0) { 1138 if (index < 0) {
1139 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 1139 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1140 __FUNCTION__, gsi_base); 1140 __func__, gsi_base);
1141 return; 1141 return;
1142 } 1142 }
1143 iosapic_lists[index].node = node; 1143 iosapic_lists[index].node = node;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 16472821d7a9..d8be23fbe6bc 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -507,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
507 if (unlikely(irq < 0)) { 507 if (unlikely(irq < 0)) {
508 printk(KERN_ERR "%s: Unexpected interrupt " 508 printk(KERN_ERR "%s: Unexpected interrupt "
509 "vector %d on CPU %d is not mapped " 509 "vector %d on CPU %d is not mapped "
510 "to any IRQ!\n", __FUNCTION__, vector, 510 "to any IRQ!\n", __func__, vector,
511 smp_processor_id()); 511 smp_processor_id());
512 } else 512 } else
513 generic_handle_irq(irq); 513 generic_handle_irq(irq);
@@ -572,7 +572,7 @@ void ia64_process_pending_intr(void)
572 if (unlikely(irq < 0)) { 572 if (unlikely(irq < 0)) {
573 printk(KERN_ERR "%s: Unexpected interrupt " 573 printk(KERN_ERR "%s: Unexpected interrupt "
574 "vector %d on CPU %d not being mapped " 574 "vector %d on CPU %d not being mapped "
575 "to any IRQ!!\n", __FUNCTION__, vector, 575 "to any IRQ!!\n", __func__, vector,
576 smp_processor_id()); 576 smp_processor_id());
577 } else { 577 } else {
578 vectors_in_migration[irq]=0; 578 vectors_in_migration[irq]=0;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6e17aed53135..6c18221dba36 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
413 IA64_LOG_INDEX_INC(sal_info_type); 413 IA64_LOG_INDEX_INC(sal_info_type);
414 IA64_LOG_UNLOCK(sal_info_type); 414 IA64_LOG_UNLOCK(sal_info_type);
415 if (irq_safe) { 415 if (irq_safe) {
416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
417 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 417 __func__, sal_info_type, total_len);
418 } 418 }
419 *buffer = (u8 *) log_buffer; 419 *buffer = (u8 *) log_buffer;
420 return total_len; 420 return total_len;
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
518 static DEFINE_SPINLOCK(cpe_history_lock); 518 static DEFINE_SPINLOCK(cpe_history_lock);
519 519
520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
521 __FUNCTION__, cpe_irq, smp_processor_id()); 521 __func__, cpe_irq, smp_processor_id());
522 522
523 /* SAL spec states this should run w/ interrupts enabled */ 523 /* SAL spec states this should run w/ interrupts enabled */
524 local_irq_enable(); 524 local_irq_enable();
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
594 } 594 }
595 595
596 IA64_MCA_DEBUG("%s: corrected platform error " 596 IA64_MCA_DEBUG("%s: corrected platform error "
597 "vector %#x registered\n", __FUNCTION__, cpev); 597 "vector %#x registered\n", __func__, cpev);
598} 598}
599#endif /* CONFIG_ACPI */ 599#endif /* CONFIG_ACPI */
600 600
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
621 cmcv.cmcv_vector = IA64_CMC_VECTOR; 621 cmcv.cmcv_vector = IA64_CMC_VECTOR;
622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
623 623
624 IA64_MCA_DEBUG("%s: CPU %d corrected " 624 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
625 "machine check vector %#x registered.\n", 625 __func__, smp_processor_id(), IA64_CMC_VECTOR);
626 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
627 626
628 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 627 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
629 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 628 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
630} 629}
631 630
632/* 631/*
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
651 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 650 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
652 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 651 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
653 652
654 IA64_MCA_DEBUG("%s: CPU %d corrected " 653 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
655 "machine check vector %#x disabled.\n", 654 __func__, smp_processor_id(), cmcv.cmcv_vector);
656 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
657} 655}
658 656
659/* 657/*
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 676 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
679 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 677 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
680 678
681 IA64_MCA_DEBUG("%s: CPU %d corrected " 679 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
682 "machine check vector %#x enabled.\n", 680 __func__, smp_processor_id(), cmcv.cmcv_vector);
683 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
684} 681}
685 682
686/* 683/*
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
767 local_irq_save(flags); 764 local_irq_save(flags);
768 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 765 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
769 (long)&nd, 0, 0) == NOTIFY_STOP) 766 (long)&nd, 0, 0) == NOTIFY_STOP)
770 ia64_mca_spin(__FUNCTION__); 767 ia64_mca_spin(__func__);
771 768
772 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 769 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
773 /* Register with the SAL monarch that the slave has 770 /* Register with the SAL monarch that the slave has
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
777 774
778 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 775 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
779 (long)&nd, 0, 0) == NOTIFY_STOP) 776 (long)&nd, 0, 0) == NOTIFY_STOP)
780 ia64_mca_spin(__FUNCTION__); 777 ia64_mca_spin(__func__);
781 778
782 /* Wait for the monarch cpu to exit. */ 779 /* Wait for the monarch cpu to exit. */
783 while (monarch_cpu != -1) 780 while (monarch_cpu != -1)
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
785 782
786 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 783 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
787 (long)&nd, 0, 0) == NOTIFY_STOP) 784 (long)&nd, 0, 0) == NOTIFY_STOP)
788 ia64_mca_spin(__FUNCTION__); 785 ia64_mca_spin(__func__);
789 786
790 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 787 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
791 /* Enable all interrupts */ 788 /* Enable all interrupts */
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1230 1227
1231 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1228 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1232 == NOTIFY_STOP) 1229 == NOTIFY_STOP)
1233 ia64_mca_spin(__FUNCTION__); 1230 ia64_mca_spin(__func__);
1234 1231
1235 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1232 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1236 if (sos->monarch) { 1233 if (sos->monarch) {
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1246 ia64_mca_wakeup_all(); 1243 ia64_mca_wakeup_all();
1247 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1244 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1248 == NOTIFY_STOP) 1245 == NOTIFY_STOP)
1249 ia64_mca_spin(__FUNCTION__); 1246 ia64_mca_spin(__func__);
1250 } else { 1247 } else {
1251 while (cpu_isset(cpu, mca_cpu)) 1248 while (cpu_isset(cpu, mca_cpu))
1252 cpu_relax(); /* spin until monarch wakes us */ 1249 cpu_relax(); /* spin until monarch wakes us */
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1276 } 1273 }
1277 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1278 == NOTIFY_STOP) 1275 == NOTIFY_STOP)
1279 ia64_mca_spin(__FUNCTION__); 1276 ia64_mca_spin(__func__);
1280 1277
1281 1278
1282 if (atomic_dec_return(&mca_count) > 0) { 1279 if (atomic_dec_return(&mca_count) > 0) {
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1328 static DEFINE_SPINLOCK(cmc_history_lock); 1325 static DEFINE_SPINLOCK(cmc_history_lock);
1329 1326
1330 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1327 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1331 __FUNCTION__, cmc_irq, smp_processor_id()); 1328 __func__, cmc_irq, smp_processor_id());
1332 1329
1333 /* SAL spec states this should run w/ interrupts enabled */ 1330 /* SAL spec states this should run w/ interrupts enabled */
1334 local_irq_enable(); 1331 local_irq_enable();
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1614 */ 1611 */
1615 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1612 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1616 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1613 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1617 __FUNCTION__, cpu); 1614 __func__, cpu);
1618 atomic_dec(&slaves); 1615 atomic_dec(&slaves);
1619 sos->monarch = 1; 1616 sos->monarch = 1;
1620 } 1617 }
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1626 */ 1623 */
1627 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1624 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1628 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1625 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1629 __FUNCTION__, cpu); 1626 __func__, cpu);
1630 atomic_dec(&monarchs); 1627 atomic_dec(&monarchs);
1631 sos->monarch = 0; 1628 sos->monarch = 0;
1632 } 1629 }
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1637 cpu_relax(); /* spin until monarch enters */ 1634 cpu_relax(); /* spin until monarch enters */
1638 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1635 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
1639 == NOTIFY_STOP) 1636 == NOTIFY_STOP)
1640 ia64_mca_spin(__FUNCTION__); 1637 ia64_mca_spin(__func__);
1641 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1638 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1642 == NOTIFY_STOP) 1639 == NOTIFY_STOP)
1643 ia64_mca_spin(__FUNCTION__); 1640 ia64_mca_spin(__func__);
1644 while (monarch_cpu != -1) 1641 while (monarch_cpu != -1)
1645 cpu_relax(); /* spin until monarch leaves */ 1642 cpu_relax(); /* spin until monarch leaves */
1646 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1643 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1647 == NOTIFY_STOP) 1644 == NOTIFY_STOP)
1648 ia64_mca_spin(__FUNCTION__); 1645 ia64_mca_spin(__func__);
1649 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1646 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1650 set_curr_task(cpu, previous_current); 1647 set_curr_task(cpu, previous_current);
1651 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1648 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1656 monarch_cpu = cpu; 1653 monarch_cpu = cpu;
1657 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1654 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
1658 == NOTIFY_STOP) 1655 == NOTIFY_STOP)
1659 ia64_mca_spin(__FUNCTION__); 1656 ia64_mca_spin(__func__);
1660 1657
1661 /* 1658 /*
1662 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1659 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1673 */ 1670 */
1674 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1671 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1675 == NOTIFY_STOP) 1672 == NOTIFY_STOP)
1676 ia64_mca_spin(__FUNCTION__); 1673 ia64_mca_spin(__func__);
1677 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1674 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1678 == NOTIFY_STOP) 1675 == NOTIFY_STOP)
1679 ia64_mca_spin(__FUNCTION__); 1676 ia64_mca_spin(__func__);
1680 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1677 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1681 atomic_dec(&monarchs); 1678 atomic_dec(&monarchs);
1682 set_curr_task(cpu, previous_current); 1679 set_curr_task(cpu, previous_current);
@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
1884 .priority = 0/* we need to notified last */ 1881 .priority = 0/* we need to notified last */
1885 }; 1882 };
1886 1883
1887 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1884 IA64_MCA_DEBUG("%s: begin\n", __func__);
1888 1885
1889 /* Clear the Rendez checkin flag for all cpus */ 1886 /* Clear the Rendez checkin flag for all cpus */
1890 for(i = 0 ; i < NR_CPUS; i++) 1887 for(i = 0 ; i < NR_CPUS; i++)
@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
1928 return; 1925 return;
1929 } 1926 }
1930 1927
1931 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1928 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1932 1929
1933 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1930 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1934 /* 1931 /*
@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
1949 return; 1946 return;
1950 } 1947 }
1951 1948
1952 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1949 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1953 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1950 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1954 1951
1955 /* 1952 /*
@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
1961 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1958 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1962 ia64_mc_info.imi_slave_init_handler_size = 0; 1959 ia64_mc_info.imi_slave_init_handler_size = 0;
1963 1960
1964 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1961 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1965 ia64_mc_info.imi_monarch_init_handler); 1962 ia64_mc_info.imi_monarch_init_handler);
1966 1963
1967 /* Register the os init handler with SAL */ 1964 /* Register the os init handler with SAL */
@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
1982 return; 1979 return;
1983 } 1980 }
1984 1981
1985 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1982 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
1986 1983
1987 /* 1984 /*
1988 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1985 * Configure the CMCI/P vector and handler. Interrupts for CMC are
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
2042 cmc_polling_enabled = 0; 2039 cmc_polling_enabled = 0;
2043 schedule_work(&cmc_enable_work); 2040 schedule_work(&cmc_enable_work);
2044 2041
2045 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 2042 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2046 2043
2047#ifdef CONFIG_ACPI 2044#ifdef CONFIG_ACPI
2048 /* Setup the CPEI/P vector and handler */ 2045 /* Setup the CPEI/P vector and handler */
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
2065 ia64_cpe_irq = irq; 2062 ia64_cpe_irq = irq;
2066 ia64_mca_register_cpev(cpe_vector); 2063 ia64_mca_register_cpev(cpe_vector);
2067 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 2064 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2068 __FUNCTION__); 2065 __func__);
2069 return 0; 2066 return 0;
2070 } 2067 }
2071 printk(KERN_ERR "%s: Failed to find irq for CPE " 2068 printk(KERN_ERR "%s: Failed to find irq for CPE "
2072 "interrupt handler, vector %d\n", 2069 "interrupt handler, vector %d\n",
2073 __FUNCTION__, cpe_vector); 2070 __func__, cpe_vector);
2074 } 2071 }
2075 /* If platform doesn't support CPEI, get the timer going. */ 2072 /* If platform doesn't support CPEI, get the timer going. */
2076 if (cpe_poll_enabled) { 2073 if (cpe_poll_enabled) {
2077 ia64_mca_cpe_poll(0UL); 2074 ia64_mca_cpe_poll(0UL);
2078 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 2075 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2079 } 2076 }
2080 } 2077 }
2081#endif 2078#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e58f4367cf11..e83e2ea3b3e0 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
493 mod->arch.opd->sh_addralign = 8; 493 mod->arch.opd->sh_addralign = 8;
494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); 494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", 495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
496 __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, 496 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
497 mod->arch.got->sh_size, mod->arch.opd->sh_size); 497 mod->arch.got->sh_size, mod->arch.opd->sh_size);
498 return 0; 498 return 0;
499} 499}
@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
585#if ARCH_MODULE_DEBUG 585#if ARCH_MODULE_DEBUG
586 if (plt_target(plt) != target_ip) { 586 if (plt_target(plt) != target_ip) {
587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n", 587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
588 __FUNCTION__, target_ip, plt_target(plt)); 588 __func__, target_ip, plt_target(plt));
589 *okp = 0; 589 *okp = 0;
590 return 0; 590 return 0;
591 } 591 }
@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
703 if (r_type == R_IA64_PCREL21BI) { 703 if (r_type == R_IA64_PCREL21BI) {
704 if (!is_internal(mod, val)) { 704 if (!is_internal(mod, val)) {
705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", 705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
706 __FUNCTION__, reloc_name[r_type], val); 706 __func__, reloc_name[r_type], val);
707 return -ENOEXEC; 707 return -ENOEXEC;
708 } 708 }
709 format = RF_INSN21B; 709 format = RF_INSN21B;
@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
737 case R_IA64_LDXMOV: 737 case R_IA64_LDXMOV:
738 if (gp_addressable(mod, val)) { 738 if (gp_addressable(mod, val)) {
739 /* turn "ld8" into "mov": */ 739 /* turn "ld8" into "mov": */
740 DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); 740 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); 741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
742 } 742 }
743 return 0; 743 return 0;
@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
771 if (!ok) 771 if (!ok)
772 return -ENOEXEC; 772 return -ENOEXEC;
773 773
774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val, 774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); 775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
776 776
777 switch (format) { 777 switch (format) {
@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
807 Elf64_Shdr *target_sec; 807 Elf64_Shdr *target_sec;
808 int ret; 808 int ret;
809 809
810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__, 810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
811 relsec, n, sechdrs[relsec].sh_info); 811 relsec, n, sechdrs[relsec].sh_info);
812 812
813 target_sec = sechdrs + sechdrs[relsec].sh_info; 813 target_sec = sechdrs + sechdrs[relsec].sh_info;
@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
835 gp = mod->core_size / 2; 835 gp = mod->core_size / 2;
836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
837 mod->arch.gp = gp; 837 mod->arch.gp = gp;
838 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); 838 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
839 } 839 }
840 840
841 for (i = 0; i < n; i++) { 841 for (i = 0; i < n; i++) {
@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod)
903 init = start + num_core; 903 init = start + num_core;
904 } 904 }
905 905
906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__, 906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
907 mod->name, mod->arch.gp, num_init, num_core); 907 mod->name, mod->arch.gp, num_init, num_core);
908 908
909 /* 909 /*
@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod)
912 if (num_core > 0) { 912 if (num_core > 0) {
913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
914 core, core + num_core); 914 core, core + num_core);
915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__, 915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
916 mod->arch.core_unw_table, core, core + num_core); 916 mod->arch.core_unw_table, core, core + num_core);
917 } 917 }
918 if (num_init > 0) { 918 if (num_init > 0) {
919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
920 init, init + num_init); 920 init, init + num_init);
921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__, 921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
922 mod->arch.init_unw_table, init, init + num_init); 922 mod->arch.init_unw_table, init, init + num_init);
923 } 923 }
924} 924}
@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod)
926int 926int
927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) 927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
928{ 928{
929 DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init); 929 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
930 if (mod->arch.unwind) 930 if (mod->arch.unwind)
931 register_unwind_table(mod); 931 register_unwind_table(mod);
932 return 0; 932 return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f6b99719f10f..a2aabfdc80d9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -227,12 +227,12 @@
227#ifdef PFM_DEBUGGING 227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \ 228#define DPRINT(a) \
229 do { \ 229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0) 231 } while (0)
232 232
233#define DPRINT_ovfl(a) \ 233#define DPRINT_ovfl(a) \
234 do { \ 234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0) 236 } while (0)
237#endif 237#endif
238 238
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index a7af1cb419f9..5f637bbfcccd 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL");
24#ifdef DEFAULT_DEBUG 24#ifdef DEFAULT_DEBUG
25#define DPRINT(a) \ 25#define DPRINT(a) \
26 do { \ 26 do { \
27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
28 } while (0) 28 } while (0)
29 29
30#define DPRINT_ovfl(a) \ 30#define DPRINT_ovfl(a) \
31 do { \ 31 do { \
32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
33 } while (0) 33 } while (0)
34 34
35#else 35#else
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7e0d7dcac1a9..ab784ec4319d 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -780,14 +780,14 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
781 < IA64_PT_REGS_SIZE) { 781 < IA64_PT_REGS_SIZE) {
782 dprintk("ptrace.%s: ran off the top of the kernel " 782 dprintk("ptrace.%s: ran off the top of the kernel "
783 "stack\n", __FUNCTION__); 783 "stack\n", __func__);
784 return; 784 return;
785 } 785 }
786 if (unw_get_pr (&prev_info, &pr) < 0) { 786 if (unw_get_pr (&prev_info, &pr) < 0) {
787 unw_get_rp(&prev_info, &ip); 787 unw_get_rp(&prev_info, &ip);
788 dprintk("ptrace.%s: failed to read " 788 dprintk("ptrace.%s: failed to read "
789 "predicate register (ip=0x%lx)\n", 789 "predicate register (ip=0x%lx)\n",
790 __FUNCTION__, ip); 790 __func__, ip);
791 return; 791 return;
792 } 792 }
793 if (unw_is_intr_frame(&info) 793 if (unw_is_intr_frame(&info)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ebd1a09f3201..4aa9eaea76c3 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model)
690 if (overflow++ == 0) 690 if (overflow++ == 0)
691 printk(KERN_ERR 691 printk(KERN_ERR
692 "%s: Table overflow. Some processor model information will be missing\n", 692 "%s: Table overflow. Some processor model information will be missing\n",
693 __FUNCTION__); 693 __func__);
694 return "Unknown"; 694 return "Unknown";
695} 695}
696 696
@@ -785,7 +785,7 @@ get_max_cacheline_size (void)
785 status = ia64_pal_cache_summary(&levels, &unique_caches); 785 status = ia64_pal_cache_summary(&levels, &unique_caches);
786 if (status != 0) { 786 if (status != 0) {
787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
788 __FUNCTION__, status); 788 __func__, status);
789 max = SMP_CACHE_BYTES; 789 max = SMP_CACHE_BYTES;
790 /* Safest setup for "flush_icache_range()" */ 790 /* Safest setup for "flush_icache_range()" */
791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
@@ -798,7 +798,7 @@ get_max_cacheline_size (void)
798 if (status != 0) { 798 if (status != 0) {
799 printk(KERN_ERR 799 printk(KERN_ERR
800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
801 __FUNCTION__, l, status); 801 __func__, l, status);
802 max = SMP_CACHE_BYTES; 802 max = SMP_CACHE_BYTES;
803 /* The safest setup for "flush_icache_range()" */ 803 /* The safest setup for "flush_icache_range()" */
804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
@@ -814,7 +814,7 @@ get_max_cacheline_size (void)
814 if (status != 0) { 814 if (status != 0) {
815 printk(KERN_ERR 815 printk(KERN_ERR
816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
817 __FUNCTION__, l, status); 817 __func__, l, status);
818 /* The safest setup for "flush_icache_range()" */ 818 /* The safest setup for "flush_icache_range()" */
819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
820 } 820 }
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 52f70bbc192a..6903361d11a5 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
28#undef DEBUG_UNALIGNED_TRAP 28#undef DEBUG_UNALIGNED_TRAP
29 29
30#ifdef DEBUG_UNALIGNED_TRAP 30#ifdef DEBUG_UNALIGNED_TRAP
31# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0) 31# define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
32# define DDUMP(str,vp,len) dump(str, vp, len) 32# define DDUMP(str,vp,len) dump(str, vp, len)
33 33
34static void 34static void
@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
674 * just in case. 674 * just in case.
675 */ 675 */
676 if (ld.x6_op == 1 || ld.x6_op == 3) { 676 if (ld.x6_op == 1 || ld.x6_op == 3) {
677 printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); 677 printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
678 if (die_if_kernel("unaligned reference on speculative load with register update\n", 678 if (die_if_kernel("unaligned reference on speculative load with register update\n",
679 regs, 30)) 679 regs, 30))
680 return; 680 return;
@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
1104 */ 1104 */
1105 if (ld.x6_op == 1 || ld.x6_op == 3) 1105 if (ld.x6_op == 1 || ld.x6_op == 3)
1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n", 1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n",
1107 __FUNCTION__); 1107 __func__);
1108 1108
1109 setreg(ld.r3, ifa, 0, regs); 1109 setreg(ld.r3, ifa, 0, regs);
1110 } 1110 }
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index c1bdb5131814..67810b77d998 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -257,7 +257,7 @@ pt_regs_off (unsigned long reg)
257 off = unw.pt_regs_offsets[reg]; 257 off = unw.pt_regs_offsets[reg];
258 258
259 if (off < 0) { 259 if (off < 0) {
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg); 260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
261 off = 0; 261 off = 0;
262 } 262 }
263 return (unsigned long) off; 263 return (unsigned long) off;
@@ -268,13 +268,13 @@ get_scratch_regs (struct unw_frame_info *info)
268{ 268{
269 if (!info->pt) { 269 if (!info->pt) {
270 /* This should not happen with valid unwind info. */ 270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__); 271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME) 272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); 273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
274 else 274 else
275 info->pt = info->sp - 16; 275 info->pt = info->sp - 16;
276 } 276 }
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt); 277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt; 278 return (struct pt_regs *) info->pt;
279} 279}
280 280
@@ -294,7 +294,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
294 return 0; 294 return 0;
295 } 295 }
296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", 296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 __FUNCTION__, regnum); 297 __func__, regnum);
298 return -1; 298 return -1;
299 } 299 }
300 300
@@ -341,7 +341,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
341 { 341 {
342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk " 342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 "[0x%lx-0x%lx)\n", 343 "[0x%lx-0x%lx)\n",
344 __FUNCTION__, (void *) addr, 344 __func__, (void *) addr,
345 info->regstk.limit, 345 info->regstk.limit,
346 info->regstk.top); 346 info->regstk.top);
347 return -1; 347 return -1;
@@ -374,7 +374,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
374 || (unsigned long) addr >= info->regstk.top) 374 || (unsigned long) addr >= info->regstk.top)
375 { 375 {
376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " 376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 "of rbs\n", __FUNCTION__); 377 "of rbs\n", __func__);
378 return -1; 378 return -1;
379 } 379 }
380 if ((unsigned long) nat_addr >= info->regstk.top) 380 if ((unsigned long) nat_addr >= info->regstk.top)
@@ -385,7 +385,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
385 if (write) { 385 if (write) {
386 if (read_only(addr)) { 386 if (read_only(addr)) {
387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
388 __FUNCTION__); 388 __func__);
389 } else { 389 } else {
390 *addr = *val; 390 *addr = *val;
391 if (*nat) 391 if (*nat)
@@ -427,13 +427,13 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int
427 427
428 default: 428 default:
429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", 429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 __FUNCTION__, regnum); 430 __func__, regnum);
431 return -1; 431 return -1;
432 } 432 }
433 if (write) 433 if (write)
434 if (read_only(addr)) { 434 if (read_only(addr)) {
435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
436 __FUNCTION__); 436 __func__);
437 } else 437 } else
438 *addr = *val; 438 *addr = *val;
439 else 439 else
@@ -450,7 +450,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
450 450
451 if ((unsigned) (regnum - 2) >= 126) { 451 if ((unsigned) (regnum - 2) >= 126) {
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", 452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 __FUNCTION__, regnum); 453 __func__, regnum);
454 return -1; 454 return -1;
455 } 455 }
456 456
@@ -482,7 +482,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
482 if (write) 482 if (write)
483 if (read_only(addr)) { 483 if (read_only(addr)) {
484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
485 __FUNCTION__); 485 __func__);
486 } else 486 } else
487 *addr = *val; 487 *addr = *val;
488 else 488 else
@@ -572,14 +572,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
572 572
573 default: 573 default:
574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", 574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 __FUNCTION__, regnum); 575 __func__, regnum);
576 return -1; 576 return -1;
577 } 577 }
578 578
579 if (write) { 579 if (write) {
580 if (read_only(addr)) { 580 if (read_only(addr)) {
581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
582 __FUNCTION__); 582 __func__);
583 } else 583 } else
584 *addr = *val; 584 *addr = *val;
585 } else 585 } else
@@ -600,7 +600,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
600 if (write) { 600 if (write) {
601 if (read_only(addr)) { 601 if (read_only(addr)) {
602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
603 __FUNCTION__); 603 __func__);
604 } else 604 } else
605 *addr = *val; 605 *addr = *val;
606 } else 606 } else
@@ -699,7 +699,7 @@ decode_abreg (unsigned char abreg, int memory)
699 default: 699 default:
700 break; 700 break;
701 } 701 }
702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg); 702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
703 return UNW_REG_LC; 703 return UNW_REG_LC;
704} 704}
705 705
@@ -739,7 +739,7 @@ spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word
739 return; 739 return;
740 } 740 }
741 } 741 }
742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__); 742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__);
743} 743}
744 744
745static inline void 745static inline void
@@ -855,11 +855,11 @@ desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
855{ 855{
856 if (abi == 3 && context == 'i') { 856 if (abi == 3 && context == 'i') {
857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME; 857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__); 858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__);
859 } 859 }
860 else 860 else
861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", 861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 __FUNCTION__, abi, context); 862 __func__, abi, context);
863} 863}
864 864
865static inline void 865static inline void
@@ -1347,7 +1347,7 @@ script_emit (struct unw_script *script, struct unw_insn insn)
1347{ 1347{
1348 if (script->count >= UNW_MAX_SCRIPT_LEN) { 1348 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", 1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 __FUNCTION__, UNW_MAX_SCRIPT_LEN); 1350 __func__, UNW_MAX_SCRIPT_LEN);
1351 return; 1351 return;
1352 } 1352 }
1353 script->insn[script->count++] = insn; 1353 script->insn[script->count++] = insn;
@@ -1389,7 +1389,7 @@ emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1389 1389
1390 default: 1390 default:
1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", 1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 __FUNCTION__, r->where); 1392 __func__, r->where);
1393 return; 1393 return;
1394 } 1394 }
1395 insn.opc = opc; 1395 insn.opc = opc;
@@ -1446,7 +1446,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6); 1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447 else 1447 else
1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", 1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 __FUNCTION__, rval); 1449 __func__, rval);
1450 } 1450 }
1451 break; 1451 break;
1452 1452
@@ -1474,7 +1474,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1474 1474
1475 default: 1475 default:
1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", 1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 __FUNCTION__, i, r->where); 1477 __func__, i, r->where);
1478 break; 1478 break;
1479 } 1479 }
1480 insn.opc = opc; 1480 insn.opc = opc;
@@ -1547,10 +1547,10 @@ build_script (struct unw_frame_info *info)
1547 r->when = UNW_WHEN_NEVER; 1547 r->when = UNW_WHEN_NEVER;
1548 sr.pr_val = info->pr; 1548 sr.pr_val = info->pr;
1549 1549
1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip); 1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1551 script = script_new(ip); 1551 script = script_new(ip);
1552 if (!script) { 1552 if (!script) {
1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__); 1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__);
1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start); 1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555 return NULL; 1555 return NULL;
1556 } 1556 }
@@ -1569,7 +1569,7 @@ build_script (struct unw_frame_info *info)
1569 if (!e) { 1569 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", 1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1572 __FUNCTION__, ip, unw.cache[info->prev_script].ip); 1572 __func__, ip, unw.cache[info->prev_script].ip);
1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; 1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1574 sr.curr.reg[UNW_REG_RP].when = -1; 1574 sr.curr.reg[UNW_REG_RP].when = -1;
1575 sr.curr.reg[UNW_REG_RP].val = 0; 1575 sr.curr.reg[UNW_REG_RP].val = 0;
@@ -1618,13 +1618,13 @@ build_script (struct unw_frame_info *info)
1618 sr.curr.reg[UNW_REG_RP].when = -1; 1618 sr.curr.reg[UNW_REG_RP].when = -1;
1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; 1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", 1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1621 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where, 1621 __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1622 sr.curr.reg[UNW_REG_RP].val); 1622 sr.curr.reg[UNW_REG_RP].val);
1623 } 1623 }
1624 1624
1625#ifdef UNW_DEBUG 1625#ifdef UNW_DEBUG
1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", 1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1627 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target); 1627 __func__, table->segment_base + e->start_offset, sr.when_target);
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { 1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { 1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); 1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
@@ -1746,7 +1746,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1746 } else { 1746 } else {
1747 s[dst] = 0; 1747 s[dst] = 0;
1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", 1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1749 __FUNCTION__, dst, val); 1749 __func__, dst, val);
1750 } 1750 }
1751 break; 1751 break;
1752 1752
@@ -1756,7 +1756,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1756 else { 1756 else {
1757 s[dst] = 0; 1757 s[dst] = 0;
1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", 1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1759 __FUNCTION__, val); 1759 __func__, val);
1760 } 1760 }
1761 break; 1761 break;
1762 1762
@@ -1791,7 +1791,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1791 || s[val] < TASK_SIZE) 1791 || s[val] < TASK_SIZE)
1792 { 1792 {
1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", 1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1794 __FUNCTION__, s[val]); 1794 __func__, s[val]);
1795 break; 1795 break;
1796 } 1796 }
1797#endif 1797#endif
@@ -1825,7 +1825,7 @@ find_save_locs (struct unw_frame_info *info)
1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { 1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1826 /* don't let obviously bad addresses pollute the cache */ 1826 /* don't let obviously bad addresses pollute the cache */
1827 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1827 /* FIXME: should really be level 0 but it occurs too often. KAO */
1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip); 1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1829 info->rp_loc = NULL; 1829 info->rp_loc = NULL;
1830 return -1; 1830 return -1;
1831 } 1831 }
@@ -1838,7 +1838,7 @@ find_save_locs (struct unw_frame_info *info)
1838 spin_unlock_irqrestore(&unw.lock, flags); 1838 spin_unlock_irqrestore(&unw.lock, flags);
1839 UNW_DPRINT(0, 1839 UNW_DPRINT(0,
1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n", 1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1841 __FUNCTION__, info->ip); 1841 __func__, info->ip);
1842 return -1; 1842 return -1;
1843 } 1843 }
1844 have_write_lock = 1; 1844 have_write_lock = 1;
@@ -1882,21 +1882,21 @@ unw_unwind (struct unw_frame_info *info)
1882 if (!unw_valid(info, info->rp_loc)) { 1882 if (!unw_valid(info, info->rp_loc)) {
1883 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1883 /* FIXME: should really be level 0 but it occurs too often. KAO */
1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", 1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1885 __FUNCTION__, info->ip); 1885 __func__, info->ip);
1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 return -1; 1887 return -1;
1888 } 1888 }
1889 /* restore the ip */ 1889 /* restore the ip */
1890 ip = info->ip = *info->rp_loc; 1890 ip = info->ip = *info->rp_loc;
1891 if (ip < GATE_ADDR) { 1891 if (ip < GATE_ADDR) {
1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip); 1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1894 return -1; 1894 return -1;
1895 } 1895 }
1896 1896
1897 /* validate the previous stack frame pointer */ 1897 /* validate the previous stack frame pointer */
1898 if (!unw_valid(info, info->pfs_loc)) { 1898 if (!unw_valid(info, info->pfs_loc)) {
1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__); 1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901 return -1; 1901 return -1;
1902 } 1902 }
@@ -1912,13 +1912,13 @@ unw_unwind (struct unw_frame_info *info)
1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */ 1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1913 info->pfs_loc = 1913 info->pfs_loc =
1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); 1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt); 1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1916 } else 1916 } else
1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ 1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); 1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { 1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", 1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1921 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top); 1921 __func__, info->bsp, info->regstk.limit, info->regstk.top);
1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1923 return -1; 1923 return -1;
1924 } 1924 }
@@ -1927,14 +1927,14 @@ unw_unwind (struct unw_frame_info *info)
1927 info->sp = info->psp; 1927 info->sp = info->psp;
1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { 1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", 1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1930 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit); 1930 __func__, info->sp, info->memstk.top, info->memstk.limit);
1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1932 return -1; 1932 return -1;
1933 } 1933 }
1934 1934
1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { 1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", 1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1937 __FUNCTION__, ip); 1937 __func__, ip);
1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1939 return -1; 1939 return -1;
1940 } 1940 }
@@ -1961,7 +1961,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) 1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1962 < IA64_PT_REGS_SIZE) { 1962 < IA64_PT_REGS_SIZE) {
1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", 1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1964 __FUNCTION__); 1964 __func__);
1965 break; 1965 break;
1966 } 1966 }
1967 if (unw_is_intr_frame(info) && 1967 if (unw_is_intr_frame(info) &&
@@ -1971,13 +1971,13 @@ unw_unwind_to_user (struct unw_frame_info *info)
1971 unw_get_rp(info, &ip); 1971 unw_get_rp(info, &ip);
1972 UNW_DPRINT(0, "unwind.%s: failed to read " 1972 UNW_DPRINT(0, "unwind.%s: failed to read "
1973 "predicate register (ip=0x%lx)\n", 1973 "predicate register (ip=0x%lx)\n",
1974 __FUNCTION__, ip); 1974 __func__, ip);
1975 return -1; 1975 return -1;
1976 } 1976 }
1977 } while (unw_unwind(info) >= 0); 1977 } while (unw_unwind(info) >= 0);
1978 unw_get_ip(info, &ip); 1978 unw_get_ip(info, &ip);
1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", 1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1980 __FUNCTION__, ip); 1980 __func__, ip);
1981 return -1; 1981 return -1;
1982} 1982}
1983EXPORT_SYMBOL(unw_unwind_to_user); 1983EXPORT_SYMBOL(unw_unwind_to_user);
@@ -2028,7 +2028,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2028 " pr 0x%lx\n" 2028 " pr 0x%lx\n"
2029 " sw 0x%lx\n" 2029 " sw 0x%lx\n"
2030 " sp 0x%lx\n", 2030 " sp 0x%lx\n",
2031 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, 2031 __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2032 info->pr, (unsigned long) info->sw, info->sp); 2032 info->pr, (unsigned long) info->sw, info->sp);
2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); 2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2034} 2034}
@@ -2047,7 +2047,7 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
2047 " bsp 0x%lx\n" 2047 " bsp 0x%lx\n"
2048 " sol 0x%lx\n" 2048 " sol 0x%lx\n"
2049 " ip 0x%lx\n", 2049 " ip 0x%lx\n",
2050 __FUNCTION__, info->bsp, sol, info->ip); 2050 __func__, info->bsp, sol, info->ip);
2051 find_save_locs(info); 2051 find_save_locs(info);
2052} 2052}
2053 2053
@@ -2058,7 +2058,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2058{ 2058{
2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); 2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2060 2060
2061 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__); 2061 UNW_DPRINT(1, "unwind.%s\n", __func__);
2062 unw_init_frame_info(info, t, sw); 2062 unw_init_frame_info(info, t, sw);
2063} 2063}
2064EXPORT_SYMBOL(unw_init_from_blocked_task); 2064EXPORT_SYMBOL(unw_init_from_blocked_task);
@@ -2088,7 +2088,7 @@ unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned lon
2088 2088
2089 if (end - start <= 0) { 2089 if (end - start <= 0) {
2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", 2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2091 __FUNCTION__); 2091 __func__);
2092 return NULL; 2092 return NULL;
2093 } 2093 }
2094 2094
@@ -2119,14 +2119,14 @@ unw_remove_unwind_table (void *handle)
2119 2119
2120 if (!handle) { 2120 if (!handle) {
2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", 2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2122 __FUNCTION__); 2122 __func__);
2123 return; 2123 return;
2124 } 2124 }
2125 2125
2126 table = handle; 2126 table = handle;
2127 if (table == &unw.kernel_table) { 2127 if (table == &unw.kernel_table) {
2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " 2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2129 "no-can-do!\n", __FUNCTION__); 2129 "no-can-do!\n", __func__);
2130 return; 2130 return;
2131 } 2131 }
2132 2132
@@ -2139,7 +2139,7 @@ unw_remove_unwind_table (void *handle)
2139 break; 2139 break;
2140 if (!prev) { 2140 if (!prev) {
2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", 2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2142 __FUNCTION__, (void *) table); 2142 __func__, (void *) table);
2143 spin_unlock_irqrestore(&unw.lock, flags); 2143 spin_unlock_irqrestore(&unw.lock, flags);
2144 return; 2144 return;
2145 } 2145 }
@@ -2185,7 +2185,7 @@ create_gate_table (void)
2185 } 2185 }
2186 2186
2187 if (!punw) { 2187 if (!punw) {
2188 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__); 2188 printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2189 return 0; 2189 return 0;
2190 } 2190 }
2191 2191
@@ -2202,7 +2202,7 @@ create_gate_table (void)
2202 unw.gate_table = kmalloc(size, GFP_KERNEL); 2202 unw.gate_table = kmalloc(size, GFP_KERNEL);
2203 if (!unw.gate_table) { 2203 if (!unw.gate_table) {
2204 unw.gate_table_size = 0; 2204 unw.gate_table_size = 0;
2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__); 2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2206 return 0; 2206 return 0;
2207 } 2207 }
2208 unw.gate_table_size = size; 2208 unw.gate_table_size = size;