aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c36
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kernel/iosapic.c24
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/mca.c73
-rw-r--r--arch/ia64/kernel/module.c22
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c4
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/setup.c8
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c102
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/pci/pci.c4
-rw-r--r--arch/ia64/sn/kernel/huberror.c4
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c36
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/io_init.c4
-rw-r--r--arch/ia64/sn/kernel/mca.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c6
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c12
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
26 files changed, 186 insertions, 191 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 94e57109fad6..8f6bcfe1dada 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -71,7 +71,7 @@ hwsw_init (void)
71#ifdef CONFIG_IA64_GENERIC 71#ifdef CONFIG_IA64_GENERIC
72 /* Better to have normal DMA than panic */ 72 /* Better to have normal DMA than panic */
73 printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," 73 printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
74 " reverting to hpzx1 platform vector\n", __FUNCTION__); 74 " reverting to hpzx1 platform vector\n", __func__);
75 machvec_init("hpzx1"); 75 machvec_init("hpzx1");
76#else 76#else
77 panic("Unable to initialize software I/O TLB services"); 77 panic("Unable to initialize software I/O TLB services");
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index a94445422cc6..523eae6d3e49 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -529,7 +529,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
529 base_mask = RESMAP_MASK(bits_wanted); 529 base_mask = RESMAP_MASK(bits_wanted);
530 mask = base_mask << bitshiftcnt; 530 mask = base_mask << bitshiftcnt;
531 531
532 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 532 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
533 for(; res_ptr < res_end ; res_ptr++) 533 for(; res_ptr < res_end ; res_ptr++)
534 { 534 {
535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
@@ -679,7 +679,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
679#endif 679#endif
680 680
681 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 681 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
682 __FUNCTION__, size, pages_needed, pide, 682 __func__, size, pages_needed, pide,
683 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 683 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
684 ioc->res_bitshift ); 684 ioc->res_bitshift );
685 685
@@ -722,8 +722,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
722 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); 722 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
723 bits_not_wanted = 0; 723 bits_not_wanted = 0;
724 724
725 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, 725 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
726 bits_not_wanted, m, pide, res_ptr, *res_ptr); 726 bits_not_wanted, m, pide, res_ptr, *res_ptr);
727 727
728 ASSERT(m != 0); 728 ASSERT(m != 0);
729 ASSERT(bits_not_wanted); 729 ASSERT(bits_not_wanted);
@@ -940,8 +940,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
940 940
941 iovp = (dma_addr_t) pide << iovp_shift; 941 iovp = (dma_addr_t) pide << iovp_shift;
942 942
943 DBG_RUN("%s() 0x%p -> 0x%lx\n", 943 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
944 __FUNCTION__, addr, (long) iovp | offset);
945 944
946 pdir_start = &(ioc->pdir_base[pide]); 945 pdir_start = &(ioc->pdir_base[pide]);
947 946
@@ -1029,8 +1028,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1029#endif 1028#endif
1030 offset = iova & ~iovp_mask; 1029 offset = iova & ~iovp_mask;
1031 1030
1032 DBG_RUN("%s() iovp 0x%lx/%x\n", 1031 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1033 __FUNCTION__, (long) iova, size);
1034 1032
1035 iova ^= offset; /* clear offset bits */ 1033 iova ^= offset; /* clear offset bits */
1036 size += offset; 1034 size += offset;
@@ -1404,7 +1402,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1404 struct scatterlist *sg; 1402 struct scatterlist *sg;
1405#endif 1403#endif
1406 1404
1407 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 1405 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1408 ioc = GET_IOC(dev); 1406 ioc = GET_IOC(dev);
1409 ASSERT(ioc); 1407 ASSERT(ioc);
1410 1408
@@ -1468,7 +1466,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1468#endif 1466#endif
1469 1467
1470 ASSERT(coalesced == filled); 1468 ASSERT(coalesced == filled);
1471 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1469 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1472 1470
1473 return filled; 1471 return filled;
1474} 1472}
@@ -1491,7 +1489,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1491#endif 1489#endif
1492 1490
1493 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1491 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1494 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); 1492 __func__, nents, sba_sg_address(sglist), sglist->length);
1495 1493
1496#ifdef ASSERT_PDIR_SANITY 1494#ifdef ASSERT_PDIR_SANITY
1497 ioc = GET_IOC(dev); 1495 ioc = GET_IOC(dev);
@@ -1509,7 +1507,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1509 nents--; 1507 nents--;
1510 } 1508 }
1511 1509
1512 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1510 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1513 1511
1514#ifdef ASSERT_PDIR_SANITY 1512#ifdef ASSERT_PDIR_SANITY
1515 spin_lock_irqsave(&ioc->res_lock, flags); 1513 spin_lock_irqsave(&ioc->res_lock, flags);
@@ -1546,7 +1544,7 @@ ioc_iova_init(struct ioc *ioc)
1546 ioc->iov_size = ~ioc->imask + 1; 1544 ioc->iov_size = ~ioc->imask + 1;
1547 1545
1548 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", 1546 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1549 __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, 1547 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1550 ioc->iov_size >> 20); 1548 ioc->iov_size >> 20);
1551 1549
1552 switch (iovp_size) { 1550 switch (iovp_size) {
@@ -1569,7 +1567,7 @@ ioc_iova_init(struct ioc *ioc)
1569 1567
1570 memset(ioc->pdir_base, 0, ioc->pdir_size); 1568 memset(ioc->pdir_base, 0, ioc->pdir_size);
1571 1569
1572 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, 1570 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1573 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); 1571 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1574 1572
1575 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); 1573 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
@@ -1612,7 +1610,7 @@ ioc_iova_init(struct ioc *ioc)
1612 1610
1613 prefetch_spill_page = virt_to_phys(addr); 1611 prefetch_spill_page = virt_to_phys(addr);
1614 1612
1615 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); 1613 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1616 } 1614 }
1617 /* 1615 /*
1618 ** Set all the PDIR entries valid w/ the spill page as the target 1616 ** Set all the PDIR entries valid w/ the spill page as the target
@@ -1641,7 +1639,7 @@ ioc_resource_init(struct ioc *ioc)
1641 /* resource map size dictated by pdir_size */ 1639 /* resource map size dictated by pdir_size */
1642 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ 1640 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1643 ioc->res_size >>= 3; /* convert bit count to byte count */ 1641 ioc->res_size >>= 3; /* convert bit count to byte count */
1644 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); 1642 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1645 1643
1646 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, 1644 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1647 get_order(ioc->res_size)); 1645 get_order(ioc->res_size));
@@ -1664,7 +1662,7 @@ ioc_resource_init(struct ioc *ioc)
1664 | prefetch_spill_page); 1662 | prefetch_spill_page);
1665#endif 1663#endif
1666 1664
1667 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, 1665 DBG_INIT("%s() res_map %x %p\n", __func__,
1668 ioc->res_size, (void *) ioc->res_map); 1666 ioc->res_size, (void *) ioc->res_map);
1669} 1667}
1670 1668
@@ -1767,7 +1765,7 @@ ioc_init(u64 hpa, void *handle)
1767 iovp_size = (1 << iovp_shift); 1765 iovp_size = (1 << iovp_shift);
1768 iovp_mask = ~(iovp_size - 1); 1766 iovp_mask = ~(iovp_size - 1);
1769 1767
1770 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, 1768 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1771 PAGE_SIZE >> 10, iovp_size >> 10); 1769 PAGE_SIZE >> 10, iovp_size >> 10);
1772 1770
1773 if (!ioc->name) { 1771 if (!ioc->name) {
@@ -2137,7 +2135,7 @@ sba_page_override(char *str)
2137 break; 2135 break;
2138 default: 2136 default:
2139 printk("%s: unknown/unsupported iommu page size %ld\n", 2137 printk("%s: unknown/unsupported iommu page size %ld\n",
2140 __FUNCTION__, page_size); 2138 __func__, page_size);
2141 } 2139 }
2142 2140
2143 return 1; 2141 return 1;
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index 9898febf609a..969fe9f443c4 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -222,7 +222,7 @@ simeth_probe1(void)
222 } 222 }
223 223
224 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) 224 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
225 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 225 panic("%s: out of interrupt vectors!\n", __func__);
226 dev->irq = rc; 226 dev->irq = rc;
227 227
228 /* 228 /*
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index ef252df50e1e..eb0c32a85fd7 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -1000,7 +1000,7 @@ simrs_init (void)
1000 if (!state->irq) { 1000 if (!state->irq) {
1001 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) 1001 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
1002 panic("%s: out of interrupt vectors!\n", 1002 panic("%s: out of interrupt vectors!\n",
1003 __FUNCTION__); 1003 __func__);
1004 state->irq = rc; 1004 state->irq = rc;
1005 ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); 1005 ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq);
1006 } 1006 }
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f1cf2df97a2d..fbe742ad2fde 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
155 if (val == DIE_INIT_MONARCH_LEAVE) 155 if (val == DIE_INIT_MONARCH_LEAVE)
156 ia64_mca_printk(KERN_NOTICE 156 ia64_mca_printk(KERN_NOTICE
157 "%s: kdump not configured\n", 157 "%s: kdump not configured\n",
158 __FUNCTION__); 158 __func__);
159 return NOTIFY_DONE; 159 return NOTIFY_DONE;
160 } 160 }
161 161
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 919070a9aed7..78f50e81cd95 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -379,8 +379,8 @@ efi_get_pal_addr (void)
379 * a dedicated ITR for the PAL code. 379 * a dedicated ITR for the PAL code.
380 */ 380 */
381 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 if ((vaddr & mask) == (KERNEL_START & mask)) {
382 printk(KERN_INFO "%s: no need to install ITR for " 382 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
383 "PAL code\n", __FUNCTION__); 383 __func__);
384 continue; 384 continue;
385 } 385 }
386 386
@@ -399,7 +399,7 @@ efi_get_pal_addr (void)
399 return __va(md->phys_addr); 399 return __va(md->phys_addr);
400 } 400 }
401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
402 __FUNCTION__); 402 __func__);
403 return NULL; 403 return NULL;
404} 404}
405 405
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7b3292282dea..082c31dcfd99 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -534,7 +534,7 @@ iosapic_reassign_vector (int irq)
534 if (iosapic_intr_info[irq].count) { 534 if (iosapic_intr_info[irq].count) {
535 new_irq = create_irq(); 535 new_irq = create_irq();
536 if (new_irq < 0) 536 if (new_irq < 0)
537 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 537 panic("%s: out of interrupt vectors!\n", __func__);
538 printk(KERN_INFO "Reassigning vector %d to %d\n", 538 printk(KERN_INFO "Reassigning vector %d to %d\n",
539 irq_to_vector(irq), irq_to_vector(new_irq)); 539 irq_to_vector(irq), irq_to_vector(new_irq));
540 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], 540 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
@@ -599,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
599 index = find_iosapic(gsi); 599 index = find_iosapic(gsi);
600 if (index < 0) { 600 if (index < 0) {
601 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 601 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
602 __FUNCTION__, gsi); 602 __func__, gsi);
603 return -ENODEV; 603 return -ENODEV;
604 } 604 }
605 605
@@ -608,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
608 rte = iosapic_alloc_rte(); 608 rte = iosapic_alloc_rte();
609 if (!rte) { 609 if (!rte) {
610 printk(KERN_WARNING "%s: cannot allocate memory\n", 610 printk(KERN_WARNING "%s: cannot allocate memory\n",
611 __FUNCTION__); 611 __func__);
612 return -ENOMEM; 612 return -ENOMEM;
613 } 613 }
614 614
@@ -625,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
625 (info->trigger != trigger || info->polarity != polarity)){ 625 (info->trigger != trigger || info->polarity != polarity)){
626 printk (KERN_WARNING 626 printk (KERN_WARNING
627 "%s: cannot override the interrupt\n", 627 "%s: cannot override the interrupt\n",
628 __FUNCTION__); 628 __func__);
629 return -EINVAL; 629 return -EINVAL;
630 } 630 }
631 rte->refcnt++; 631 rte->refcnt++;
@@ -647,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
647 if (idesc->chip != &no_irq_type) 647 if (idesc->chip != &no_irq_type)
648 printk(KERN_WARNING 648 printk(KERN_WARNING
649 "%s: changing vector %d from %s to %s\n", 649 "%s: changing vector %d from %s to %s\n",
650 __FUNCTION__, irq_to_vector(irq), 650 __func__, irq_to_vector(irq),
651 idesc->chip->name, irq_type->name); 651 idesc->chip->name, irq_type->name);
652 idesc->chip = irq_type; 652 idesc->chip = irq_type;
653 } 653 }
@@ -920,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
920 case ACPI_INTERRUPT_INIT: 920 case ACPI_INTERRUPT_INIT:
921 irq = create_irq(); 921 irq = create_irq();
922 if (irq < 0) 922 if (irq < 0)
923 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 923 panic("%s: out of interrupt vectors!\n", __func__);
924 vector = irq_to_vector(irq); 924 vector = irq_to_vector(irq);
925 delivery = IOSAPIC_INIT; 925 delivery = IOSAPIC_INIT;
926 break; 926 break;
@@ -931,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
931 mask = 1; 931 mask = 1;
932 break; 932 break;
933 default: 933 default:
934 printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, 934 printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
935 int_type); 935 int_type);
936 return -1; 936 return -1;
937 } 937 }
@@ -996,7 +996,7 @@ iosapic_system_init (int system_pcat_compat)
996 */ 996 */
997 printk(KERN_INFO 997 printk(KERN_INFO
998 "%s: Disabling PC-AT compatible 8259 interrupts\n", 998 "%s: Disabling PC-AT compatible 8259 interrupts\n",
999 __FUNCTION__); 999 __func__);
1000 outb(0xff, 0xA1); 1000 outb(0xff, 0xA1);
1001 outb(0xff, 0x21); 1001 outb(0xff, 0x21);
1002 } 1002 }
@@ -1011,7 +1011,7 @@ iosapic_alloc (void)
1011 if (!iosapic_lists[index].addr) 1011 if (!iosapic_lists[index].addr)
1012 return index; 1012 return index;
1013 1013
1014 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__); 1014 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
1015 return -1; 1015 return -1;
1016} 1016}
1017 1017
@@ -1109,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base)
1109 index = find_iosapic(gsi_base); 1109 index = find_iosapic(gsi_base);
1110 if (index < 0) { 1110 if (index < 0) {
1111 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", 1111 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1112 __FUNCTION__, gsi_base); 1112 __func__, gsi_base);
1113 goto out; 1113 goto out;
1114 } 1114 }
1115 1115
1116 if (iosapic_lists[index].rtes_inuse) { 1116 if (iosapic_lists[index].rtes_inuse) {
1117 err = -EBUSY; 1117 err = -EBUSY;
1118 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", 1118 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1119 __FUNCTION__, gsi_base); 1119 __func__, gsi_base);
1120 goto out; 1120 goto out;
1121 } 1121 }
1122 1122
@@ -1137,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1137 index = find_iosapic(gsi_base); 1137 index = find_iosapic(gsi_base);
1138 if (index < 0) { 1138 if (index < 0) {
1139 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 1139 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1140 __FUNCTION__, gsi_base); 1140 __func__, gsi_base);
1141 return; 1141 return;
1142 } 1142 }
1143 iosapic_lists[index].node = node; 1143 iosapic_lists[index].node = node;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 16472821d7a9..d8be23fbe6bc 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -507,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
507 if (unlikely(irq < 0)) { 507 if (unlikely(irq < 0)) {
508 printk(KERN_ERR "%s: Unexpected interrupt " 508 printk(KERN_ERR "%s: Unexpected interrupt "
509 "vector %d on CPU %d is not mapped " 509 "vector %d on CPU %d is not mapped "
510 "to any IRQ!\n", __FUNCTION__, vector, 510 "to any IRQ!\n", __func__, vector,
511 smp_processor_id()); 511 smp_processor_id());
512 } else 512 } else
513 generic_handle_irq(irq); 513 generic_handle_irq(irq);
@@ -572,7 +572,7 @@ void ia64_process_pending_intr(void)
572 if (unlikely(irq < 0)) { 572 if (unlikely(irq < 0)) {
573 printk(KERN_ERR "%s: Unexpected interrupt " 573 printk(KERN_ERR "%s: Unexpected interrupt "
574 "vector %d on CPU %d not being mapped " 574 "vector %d on CPU %d not being mapped "
575 "to any IRQ!!\n", __FUNCTION__, vector, 575 "to any IRQ!!\n", __func__, vector,
576 smp_processor_id()); 576 smp_processor_id());
577 } else { 577 } else {
578 vectors_in_migration[irq]=0; 578 vectors_in_migration[irq]=0;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6e17aed53135..6c18221dba36 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
413 IA64_LOG_INDEX_INC(sal_info_type); 413 IA64_LOG_INDEX_INC(sal_info_type);
414 IA64_LOG_UNLOCK(sal_info_type); 414 IA64_LOG_UNLOCK(sal_info_type);
415 if (irq_safe) { 415 if (irq_safe) {
416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
417 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 417 __func__, sal_info_type, total_len);
418 } 418 }
419 *buffer = (u8 *) log_buffer; 419 *buffer = (u8 *) log_buffer;
420 return total_len; 420 return total_len;
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
518 static DEFINE_SPINLOCK(cpe_history_lock); 518 static DEFINE_SPINLOCK(cpe_history_lock);
519 519
520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
521 __FUNCTION__, cpe_irq, smp_processor_id()); 521 __func__, cpe_irq, smp_processor_id());
522 522
523 /* SAL spec states this should run w/ interrupts enabled */ 523 /* SAL spec states this should run w/ interrupts enabled */
524 local_irq_enable(); 524 local_irq_enable();
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
594 } 594 }
595 595
596 IA64_MCA_DEBUG("%s: corrected platform error " 596 IA64_MCA_DEBUG("%s: corrected platform error "
597 "vector %#x registered\n", __FUNCTION__, cpev); 597 "vector %#x registered\n", __func__, cpev);
598} 598}
599#endif /* CONFIG_ACPI */ 599#endif /* CONFIG_ACPI */
600 600
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
621 cmcv.cmcv_vector = IA64_CMC_VECTOR; 621 cmcv.cmcv_vector = IA64_CMC_VECTOR;
622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
623 623
624 IA64_MCA_DEBUG("%s: CPU %d corrected " 624 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
625 "machine check vector %#x registered.\n", 625 __func__, smp_processor_id(), IA64_CMC_VECTOR);
626 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
627 626
628 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 627 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
629 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 628 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
630} 629}
631 630
632/* 631/*
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
651 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 650 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
652 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 651 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
653 652
654 IA64_MCA_DEBUG("%s: CPU %d corrected " 653 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
655 "machine check vector %#x disabled.\n", 654 __func__, smp_processor_id(), cmcv.cmcv_vector);
656 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
657} 655}
658 656
659/* 657/*
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 676 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
679 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 677 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
680 678
681 IA64_MCA_DEBUG("%s: CPU %d corrected " 679 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
682 "machine check vector %#x enabled.\n", 680 __func__, smp_processor_id(), cmcv.cmcv_vector);
683 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
684} 681}
685 682
686/* 683/*
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
767 local_irq_save(flags); 764 local_irq_save(flags);
768 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 765 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
769 (long)&nd, 0, 0) == NOTIFY_STOP) 766 (long)&nd, 0, 0) == NOTIFY_STOP)
770 ia64_mca_spin(__FUNCTION__); 767 ia64_mca_spin(__func__);
771 768
772 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 769 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
773 /* Register with the SAL monarch that the slave has 770 /* Register with the SAL monarch that the slave has
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
777 774
778 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 775 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
779 (long)&nd, 0, 0) == NOTIFY_STOP) 776 (long)&nd, 0, 0) == NOTIFY_STOP)
780 ia64_mca_spin(__FUNCTION__); 777 ia64_mca_spin(__func__);
781 778
782 /* Wait for the monarch cpu to exit. */ 779 /* Wait for the monarch cpu to exit. */
783 while (monarch_cpu != -1) 780 while (monarch_cpu != -1)
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
785 782
786 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 783 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
787 (long)&nd, 0, 0) == NOTIFY_STOP) 784 (long)&nd, 0, 0) == NOTIFY_STOP)
788 ia64_mca_spin(__FUNCTION__); 785 ia64_mca_spin(__func__);
789 786
790 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 787 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
791 /* Enable all interrupts */ 788 /* Enable all interrupts */
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1230 1227
1231 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1228 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1232 == NOTIFY_STOP) 1229 == NOTIFY_STOP)
1233 ia64_mca_spin(__FUNCTION__); 1230 ia64_mca_spin(__func__);
1234 1231
1235 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1232 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1236 if (sos->monarch) { 1233 if (sos->monarch) {
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1246 ia64_mca_wakeup_all(); 1243 ia64_mca_wakeup_all();
1247 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1244 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1248 == NOTIFY_STOP) 1245 == NOTIFY_STOP)
1249 ia64_mca_spin(__FUNCTION__); 1246 ia64_mca_spin(__func__);
1250 } else { 1247 } else {
1251 while (cpu_isset(cpu, mca_cpu)) 1248 while (cpu_isset(cpu, mca_cpu))
1252 cpu_relax(); /* spin until monarch wakes us */ 1249 cpu_relax(); /* spin until monarch wakes us */
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1276 } 1273 }
1277 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1278 == NOTIFY_STOP) 1275 == NOTIFY_STOP)
1279 ia64_mca_spin(__FUNCTION__); 1276 ia64_mca_spin(__func__);
1280 1277
1281 1278
1282 if (atomic_dec_return(&mca_count) > 0) { 1279 if (atomic_dec_return(&mca_count) > 0) {
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1328 static DEFINE_SPINLOCK(cmc_history_lock); 1325 static DEFINE_SPINLOCK(cmc_history_lock);
1329 1326
1330 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1327 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1331 __FUNCTION__, cmc_irq, smp_processor_id()); 1328 __func__, cmc_irq, smp_processor_id());
1332 1329
1333 /* SAL spec states this should run w/ interrupts enabled */ 1330 /* SAL spec states this should run w/ interrupts enabled */
1334 local_irq_enable(); 1331 local_irq_enable();
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1614 */ 1611 */
1615 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1612 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1616 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1613 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1617 __FUNCTION__, cpu); 1614 __func__, cpu);
1618 atomic_dec(&slaves); 1615 atomic_dec(&slaves);
1619 sos->monarch = 1; 1616 sos->monarch = 1;
1620 } 1617 }
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1626 */ 1623 */
1627 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1624 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1628 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1625 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1629 __FUNCTION__, cpu); 1626 __func__, cpu);
1630 atomic_dec(&monarchs); 1627 atomic_dec(&monarchs);
1631 sos->monarch = 0; 1628 sos->monarch = 0;
1632 } 1629 }
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1637 cpu_relax(); /* spin until monarch enters */ 1634 cpu_relax(); /* spin until monarch enters */
1638 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1635 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
1639 == NOTIFY_STOP) 1636 == NOTIFY_STOP)
1640 ia64_mca_spin(__FUNCTION__); 1637 ia64_mca_spin(__func__);
1641 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1638 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1642 == NOTIFY_STOP) 1639 == NOTIFY_STOP)
1643 ia64_mca_spin(__FUNCTION__); 1640 ia64_mca_spin(__func__);
1644 while (monarch_cpu != -1) 1641 while (monarch_cpu != -1)
1645 cpu_relax(); /* spin until monarch leaves */ 1642 cpu_relax(); /* spin until monarch leaves */
1646 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1643 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1647 == NOTIFY_STOP) 1644 == NOTIFY_STOP)
1648 ia64_mca_spin(__FUNCTION__); 1645 ia64_mca_spin(__func__);
1649 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1646 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1650 set_curr_task(cpu, previous_current); 1647 set_curr_task(cpu, previous_current);
1651 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1648 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1656 monarch_cpu = cpu; 1653 monarch_cpu = cpu;
1657 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1654 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
1658 == NOTIFY_STOP) 1655 == NOTIFY_STOP)
1659 ia64_mca_spin(__FUNCTION__); 1656 ia64_mca_spin(__func__);
1660 1657
1661 /* 1658 /*
1662 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1659 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1673 */ 1670 */
1674 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1671 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1675 == NOTIFY_STOP) 1672 == NOTIFY_STOP)
1676 ia64_mca_spin(__FUNCTION__); 1673 ia64_mca_spin(__func__);
1677 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1674 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1678 == NOTIFY_STOP) 1675 == NOTIFY_STOP)
1679 ia64_mca_spin(__FUNCTION__); 1676 ia64_mca_spin(__func__);
1680 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1677 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1681 atomic_dec(&monarchs); 1678 atomic_dec(&monarchs);
1682 set_curr_task(cpu, previous_current); 1679 set_curr_task(cpu, previous_current);
@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
1884 .priority = 0/* we need to notified last */ 1881 .priority = 0/* we need to notified last */
1885 }; 1882 };
1886 1883
1887 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1884 IA64_MCA_DEBUG("%s: begin\n", __func__);
1888 1885
1889 /* Clear the Rendez checkin flag for all cpus */ 1886 /* Clear the Rendez checkin flag for all cpus */
1890 for(i = 0 ; i < NR_CPUS; i++) 1887 for(i = 0 ; i < NR_CPUS; i++)
@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
1928 return; 1925 return;
1929 } 1926 }
1930 1927
1931 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1928 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1932 1929
1933 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1930 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1934 /* 1931 /*
@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
1949 return; 1946 return;
1950 } 1947 }
1951 1948
1952 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1949 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1953 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1950 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1954 1951
1955 /* 1952 /*
@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
1961 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1958 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1962 ia64_mc_info.imi_slave_init_handler_size = 0; 1959 ia64_mc_info.imi_slave_init_handler_size = 0;
1963 1960
1964 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1961 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1965 ia64_mc_info.imi_monarch_init_handler); 1962 ia64_mc_info.imi_monarch_init_handler);
1966 1963
1967 /* Register the os init handler with SAL */ 1964 /* Register the os init handler with SAL */
@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
1982 return; 1979 return;
1983 } 1980 }
1984 1981
1985 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1982 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
1986 1983
1987 /* 1984 /*
1988 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1985 * Configure the CMCI/P vector and handler. Interrupts for CMC are
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
2042 cmc_polling_enabled = 0; 2039 cmc_polling_enabled = 0;
2043 schedule_work(&cmc_enable_work); 2040 schedule_work(&cmc_enable_work);
2044 2041
2045 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 2042 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2046 2043
2047#ifdef CONFIG_ACPI 2044#ifdef CONFIG_ACPI
2048 /* Setup the CPEI/P vector and handler */ 2045 /* Setup the CPEI/P vector and handler */
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
2065 ia64_cpe_irq = irq; 2062 ia64_cpe_irq = irq;
2066 ia64_mca_register_cpev(cpe_vector); 2063 ia64_mca_register_cpev(cpe_vector);
2067 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 2064 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2068 __FUNCTION__); 2065 __func__);
2069 return 0; 2066 return 0;
2070 } 2067 }
2071 printk(KERN_ERR "%s: Failed to find irq for CPE " 2068 printk(KERN_ERR "%s: Failed to find irq for CPE "
2072 "interrupt handler, vector %d\n", 2069 "interrupt handler, vector %d\n",
2073 __FUNCTION__, cpe_vector); 2070 __func__, cpe_vector);
2074 } 2071 }
2075 /* If platform doesn't support CPEI, get the timer going. */ 2072 /* If platform doesn't support CPEI, get the timer going. */
2076 if (cpe_poll_enabled) { 2073 if (cpe_poll_enabled) {
2077 ia64_mca_cpe_poll(0UL); 2074 ia64_mca_cpe_poll(0UL);
2078 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 2075 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2079 } 2076 }
2080 } 2077 }
2081#endif 2078#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e58f4367cf11..e83e2ea3b3e0 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
493 mod->arch.opd->sh_addralign = 8; 493 mod->arch.opd->sh_addralign = 8;
494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); 494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", 495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
496 __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, 496 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
497 mod->arch.got->sh_size, mod->arch.opd->sh_size); 497 mod->arch.got->sh_size, mod->arch.opd->sh_size);
498 return 0; 498 return 0;
499} 499}
@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
585#if ARCH_MODULE_DEBUG 585#if ARCH_MODULE_DEBUG
586 if (plt_target(plt) != target_ip) { 586 if (plt_target(plt) != target_ip) {
587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n", 587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
588 __FUNCTION__, target_ip, plt_target(plt)); 588 __func__, target_ip, plt_target(plt));
589 *okp = 0; 589 *okp = 0;
590 return 0; 590 return 0;
591 } 591 }
@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
703 if (r_type == R_IA64_PCREL21BI) { 703 if (r_type == R_IA64_PCREL21BI) {
704 if (!is_internal(mod, val)) { 704 if (!is_internal(mod, val)) {
705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", 705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
706 __FUNCTION__, reloc_name[r_type], val); 706 __func__, reloc_name[r_type], val);
707 return -ENOEXEC; 707 return -ENOEXEC;
708 } 708 }
709 format = RF_INSN21B; 709 format = RF_INSN21B;
@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
737 case R_IA64_LDXMOV: 737 case R_IA64_LDXMOV:
738 if (gp_addressable(mod, val)) { 738 if (gp_addressable(mod, val)) {
739 /* turn "ld8" into "mov": */ 739 /* turn "ld8" into "mov": */
740 DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); 740 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); 741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
742 } 742 }
743 return 0; 743 return 0;
@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
771 if (!ok) 771 if (!ok)
772 return -ENOEXEC; 772 return -ENOEXEC;
773 773
774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val, 774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); 775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
776 776
777 switch (format) { 777 switch (format) {
@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
807 Elf64_Shdr *target_sec; 807 Elf64_Shdr *target_sec;
808 int ret; 808 int ret;
809 809
810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__, 810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
811 relsec, n, sechdrs[relsec].sh_info); 811 relsec, n, sechdrs[relsec].sh_info);
812 812
813 target_sec = sechdrs + sechdrs[relsec].sh_info; 813 target_sec = sechdrs + sechdrs[relsec].sh_info;
@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
835 gp = mod->core_size / 2; 835 gp = mod->core_size / 2;
836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
837 mod->arch.gp = gp; 837 mod->arch.gp = gp;
838 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); 838 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
839 } 839 }
840 840
841 for (i = 0; i < n; i++) { 841 for (i = 0; i < n; i++) {
@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod)
903 init = start + num_core; 903 init = start + num_core;
904 } 904 }
905 905
906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__, 906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
907 mod->name, mod->arch.gp, num_init, num_core); 907 mod->name, mod->arch.gp, num_init, num_core);
908 908
909 /* 909 /*
@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod)
912 if (num_core > 0) { 912 if (num_core > 0) {
913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
914 core, core + num_core); 914 core, core + num_core);
915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__, 915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
916 mod->arch.core_unw_table, core, core + num_core); 916 mod->arch.core_unw_table, core, core + num_core);
917 } 917 }
918 if (num_init > 0) { 918 if (num_init > 0) {
919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
920 init, init + num_init); 920 init, init + num_init);
921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__, 921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
922 mod->arch.init_unw_table, init, init + num_init); 922 mod->arch.init_unw_table, init, init + num_init);
923 } 923 }
924} 924}
@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod)
926int 926int
927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) 927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
928{ 928{
929 DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init); 929 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
930 if (mod->arch.unwind) 930 if (mod->arch.unwind)
931 register_unwind_table(mod); 931 register_unwind_table(mod);
932 return 0; 932 return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f6b99719f10f..a2aabfdc80d9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -227,12 +227,12 @@
227#ifdef PFM_DEBUGGING 227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \ 228#define DPRINT(a) \
229 do { \ 229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0) 231 } while (0)
232 232
233#define DPRINT_ovfl(a) \ 233#define DPRINT_ovfl(a) \
234 do { \ 234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0) 236 } while (0)
237#endif 237#endif
238 238
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index a7af1cb419f9..5f637bbfcccd 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL");
24#ifdef DEFAULT_DEBUG 24#ifdef DEFAULT_DEBUG
25#define DPRINT(a) \ 25#define DPRINT(a) \
26 do { \ 26 do { \
27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
28 } while (0) 28 } while (0)
29 29
30#define DPRINT_ovfl(a) \ 30#define DPRINT_ovfl(a) \
31 do { \ 31 do { \
32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
33 } while (0) 33 } while (0)
34 34
35#else 35#else
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7e0d7dcac1a9..ab784ec4319d 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -780,14 +780,14 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
781 < IA64_PT_REGS_SIZE) { 781 < IA64_PT_REGS_SIZE) {
782 dprintk("ptrace.%s: ran off the top of the kernel " 782 dprintk("ptrace.%s: ran off the top of the kernel "
783 "stack\n", __FUNCTION__); 783 "stack\n", __func__);
784 return; 784 return;
785 } 785 }
786 if (unw_get_pr (&prev_info, &pr) < 0) { 786 if (unw_get_pr (&prev_info, &pr) < 0) {
787 unw_get_rp(&prev_info, &ip); 787 unw_get_rp(&prev_info, &ip);
788 dprintk("ptrace.%s: failed to read " 788 dprintk("ptrace.%s: failed to read "
789 "predicate register (ip=0x%lx)\n", 789 "predicate register (ip=0x%lx)\n",
790 __FUNCTION__, ip); 790 __func__, ip);
791 return; 791 return;
792 } 792 }
793 if (unw_is_intr_frame(&info) 793 if (unw_is_intr_frame(&info)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ebd1a09f3201..4aa9eaea76c3 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model)
690 if (overflow++ == 0) 690 if (overflow++ == 0)
691 printk(KERN_ERR 691 printk(KERN_ERR
692 "%s: Table overflow. Some processor model information will be missing\n", 692 "%s: Table overflow. Some processor model information will be missing\n",
693 __FUNCTION__); 693 __func__);
694 return "Unknown"; 694 return "Unknown";
695} 695}
696 696
@@ -785,7 +785,7 @@ get_max_cacheline_size (void)
785 status = ia64_pal_cache_summary(&levels, &unique_caches); 785 status = ia64_pal_cache_summary(&levels, &unique_caches);
786 if (status != 0) { 786 if (status != 0) {
787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
788 __FUNCTION__, status); 788 __func__, status);
789 max = SMP_CACHE_BYTES; 789 max = SMP_CACHE_BYTES;
790 /* Safest setup for "flush_icache_range()" */ 790 /* Safest setup for "flush_icache_range()" */
791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
@@ -798,7 +798,7 @@ get_max_cacheline_size (void)
798 if (status != 0) { 798 if (status != 0) {
799 printk(KERN_ERR 799 printk(KERN_ERR
800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
801 __FUNCTION__, l, status); 801 __func__, l, status);
802 max = SMP_CACHE_BYTES; 802 max = SMP_CACHE_BYTES;
803 /* The safest setup for "flush_icache_range()" */ 803 /* The safest setup for "flush_icache_range()" */
804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
@@ -814,7 +814,7 @@ get_max_cacheline_size (void)
814 if (status != 0) { 814 if (status != 0) {
815 printk(KERN_ERR 815 printk(KERN_ERR
816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
817 __FUNCTION__, l, status); 817 __func__, l, status);
818 /* The safest setup for "flush_icache_range()" */ 818 /* The safest setup for "flush_icache_range()" */
819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
820 } 820 }
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 52f70bbc192a..6903361d11a5 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
28#undef DEBUG_UNALIGNED_TRAP 28#undef DEBUG_UNALIGNED_TRAP
29 29
30#ifdef DEBUG_UNALIGNED_TRAP 30#ifdef DEBUG_UNALIGNED_TRAP
31# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0) 31# define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
32# define DDUMP(str,vp,len) dump(str, vp, len) 32# define DDUMP(str,vp,len) dump(str, vp, len)
33 33
34static void 34static void
@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
674 * just in case. 674 * just in case.
675 */ 675 */
676 if (ld.x6_op == 1 || ld.x6_op == 3) { 676 if (ld.x6_op == 1 || ld.x6_op == 3) {
677 printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); 677 printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
678 if (die_if_kernel("unaligned reference on speculative load with register update\n", 678 if (die_if_kernel("unaligned reference on speculative load with register update\n",
679 regs, 30)) 679 regs, 30))
680 return; 680 return;
@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
1104 */ 1104 */
1105 if (ld.x6_op == 1 || ld.x6_op == 3) 1105 if (ld.x6_op == 1 || ld.x6_op == 3)
1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n", 1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n",
1107 __FUNCTION__); 1107 __func__);
1108 1108
1109 setreg(ld.r3, ifa, 0, regs); 1109 setreg(ld.r3, ifa, 0, regs);
1110 } 1110 }
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index c1bdb5131814..67810b77d998 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -257,7 +257,7 @@ pt_regs_off (unsigned long reg)
257 off = unw.pt_regs_offsets[reg]; 257 off = unw.pt_regs_offsets[reg];
258 258
259 if (off < 0) { 259 if (off < 0) {
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg); 260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
261 off = 0; 261 off = 0;
262 } 262 }
263 return (unsigned long) off; 263 return (unsigned long) off;
@@ -268,13 +268,13 @@ get_scratch_regs (struct unw_frame_info *info)
268{ 268{
269 if (!info->pt) { 269 if (!info->pt) {
270 /* This should not happen with valid unwind info. */ 270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__); 271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME) 272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); 273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
274 else 274 else
275 info->pt = info->sp - 16; 275 info->pt = info->sp - 16;
276 } 276 }
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt); 277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt; 278 return (struct pt_regs *) info->pt;
279} 279}
280 280
@@ -294,7 +294,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
294 return 0; 294 return 0;
295 } 295 }
296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", 296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 __FUNCTION__, regnum); 297 __func__, regnum);
298 return -1; 298 return -1;
299 } 299 }
300 300
@@ -341,7 +341,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
341 { 341 {
342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk " 342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 "[0x%lx-0x%lx)\n", 343 "[0x%lx-0x%lx)\n",
344 __FUNCTION__, (void *) addr, 344 __func__, (void *) addr,
345 info->regstk.limit, 345 info->regstk.limit,
346 info->regstk.top); 346 info->regstk.top);
347 return -1; 347 return -1;
@@ -374,7 +374,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
374 || (unsigned long) addr >= info->regstk.top) 374 || (unsigned long) addr >= info->regstk.top)
375 { 375 {
376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " 376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 "of rbs\n", __FUNCTION__); 377 "of rbs\n", __func__);
378 return -1; 378 return -1;
379 } 379 }
380 if ((unsigned long) nat_addr >= info->regstk.top) 380 if ((unsigned long) nat_addr >= info->regstk.top)
@@ -385,7 +385,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
385 if (write) { 385 if (write) {
386 if (read_only(addr)) { 386 if (read_only(addr)) {
387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
388 __FUNCTION__); 388 __func__);
389 } else { 389 } else {
390 *addr = *val; 390 *addr = *val;
391 if (*nat) 391 if (*nat)
@@ -427,13 +427,13 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int
427 427
428 default: 428 default:
429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", 429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 __FUNCTION__, regnum); 430 __func__, regnum);
431 return -1; 431 return -1;
432 } 432 }
433 if (write) 433 if (write)
434 if (read_only(addr)) { 434 if (read_only(addr)) {
435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
436 __FUNCTION__); 436 __func__);
437 } else 437 } else
438 *addr = *val; 438 *addr = *val;
439 else 439 else
@@ -450,7 +450,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
450 450
451 if ((unsigned) (regnum - 2) >= 126) { 451 if ((unsigned) (regnum - 2) >= 126) {
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", 452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 __FUNCTION__, regnum); 453 __func__, regnum);
454 return -1; 454 return -1;
455 } 455 }
456 456
@@ -482,7 +482,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
482 if (write) 482 if (write)
483 if (read_only(addr)) { 483 if (read_only(addr)) {
484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
485 __FUNCTION__); 485 __func__);
486 } else 486 } else
487 *addr = *val; 487 *addr = *val;
488 else 488 else
@@ -572,14 +572,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
572 572
573 default: 573 default:
574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", 574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 __FUNCTION__, regnum); 575 __func__, regnum);
576 return -1; 576 return -1;
577 } 577 }
578 578
579 if (write) { 579 if (write) {
580 if (read_only(addr)) { 580 if (read_only(addr)) {
581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
582 __FUNCTION__); 582 __func__);
583 } else 583 } else
584 *addr = *val; 584 *addr = *val;
585 } else 585 } else
@@ -600,7 +600,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
600 if (write) { 600 if (write) {
601 if (read_only(addr)) { 601 if (read_only(addr)) {
602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
603 __FUNCTION__); 603 __func__);
604 } else 604 } else
605 *addr = *val; 605 *addr = *val;
606 } else 606 } else
@@ -699,7 +699,7 @@ decode_abreg (unsigned char abreg, int memory)
699 default: 699 default:
700 break; 700 break;
701 } 701 }
702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg); 702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
703 return UNW_REG_LC; 703 return UNW_REG_LC;
704} 704}
705 705
@@ -739,7 +739,7 @@ spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word
739 return; 739 return;
740 } 740 }
741 } 741 }
742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__); 742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__);
743} 743}
744 744
745static inline void 745static inline void
@@ -855,11 +855,11 @@ desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
855{ 855{
856 if (abi == 3 && context == 'i') { 856 if (abi == 3 && context == 'i') {
857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME; 857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__); 858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__);
859 } 859 }
860 else 860 else
861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", 861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 __FUNCTION__, abi, context); 862 __func__, abi, context);
863} 863}
864 864
865static inline void 865static inline void
@@ -1347,7 +1347,7 @@ script_emit (struct unw_script *script, struct unw_insn insn)
1347{ 1347{
1348 if (script->count >= UNW_MAX_SCRIPT_LEN) { 1348 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", 1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 __FUNCTION__, UNW_MAX_SCRIPT_LEN); 1350 __func__, UNW_MAX_SCRIPT_LEN);
1351 return; 1351 return;
1352 } 1352 }
1353 script->insn[script->count++] = insn; 1353 script->insn[script->count++] = insn;
@@ -1389,7 +1389,7 @@ emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1389 1389
1390 default: 1390 default:
1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", 1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 __FUNCTION__, r->where); 1392 __func__, r->where);
1393 return; 1393 return;
1394 } 1394 }
1395 insn.opc = opc; 1395 insn.opc = opc;
@@ -1446,7 +1446,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6); 1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447 else 1447 else
1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", 1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 __FUNCTION__, rval); 1449 __func__, rval);
1450 } 1450 }
1451 break; 1451 break;
1452 1452
@@ -1474,7 +1474,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1474 1474
1475 default: 1475 default:
1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", 1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 __FUNCTION__, i, r->where); 1477 __func__, i, r->where);
1478 break; 1478 break;
1479 } 1479 }
1480 insn.opc = opc; 1480 insn.opc = opc;
@@ -1547,10 +1547,10 @@ build_script (struct unw_frame_info *info)
1547 r->when = UNW_WHEN_NEVER; 1547 r->when = UNW_WHEN_NEVER;
1548 sr.pr_val = info->pr; 1548 sr.pr_val = info->pr;
1549 1549
1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip); 1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1551 script = script_new(ip); 1551 script = script_new(ip);
1552 if (!script) { 1552 if (!script) {
1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__); 1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__);
1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start); 1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555 return NULL; 1555 return NULL;
1556 } 1556 }
@@ -1569,7 +1569,7 @@ build_script (struct unw_frame_info *info)
1569 if (!e) { 1569 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", 1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1572 __FUNCTION__, ip, unw.cache[info->prev_script].ip); 1572 __func__, ip, unw.cache[info->prev_script].ip);
1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; 1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1574 sr.curr.reg[UNW_REG_RP].when = -1; 1574 sr.curr.reg[UNW_REG_RP].when = -1;
1575 sr.curr.reg[UNW_REG_RP].val = 0; 1575 sr.curr.reg[UNW_REG_RP].val = 0;
@@ -1618,13 +1618,13 @@ build_script (struct unw_frame_info *info)
1618 sr.curr.reg[UNW_REG_RP].when = -1; 1618 sr.curr.reg[UNW_REG_RP].when = -1;
1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; 1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", 1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1621 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where, 1621 __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1622 sr.curr.reg[UNW_REG_RP].val); 1622 sr.curr.reg[UNW_REG_RP].val);
1623 } 1623 }
1624 1624
1625#ifdef UNW_DEBUG 1625#ifdef UNW_DEBUG
1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", 1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1627 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target); 1627 __func__, table->segment_base + e->start_offset, sr.when_target);
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { 1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { 1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); 1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
@@ -1746,7 +1746,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1746 } else { 1746 } else {
1747 s[dst] = 0; 1747 s[dst] = 0;
1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", 1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1749 __FUNCTION__, dst, val); 1749 __func__, dst, val);
1750 } 1750 }
1751 break; 1751 break;
1752 1752
@@ -1756,7 +1756,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1756 else { 1756 else {
1757 s[dst] = 0; 1757 s[dst] = 0;
1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", 1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1759 __FUNCTION__, val); 1759 __func__, val);
1760 } 1760 }
1761 break; 1761 break;
1762 1762
@@ -1791,7 +1791,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1791 || s[val] < TASK_SIZE) 1791 || s[val] < TASK_SIZE)
1792 { 1792 {
1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", 1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1794 __FUNCTION__, s[val]); 1794 __func__, s[val]);
1795 break; 1795 break;
1796 } 1796 }
1797#endif 1797#endif
@@ -1825,7 +1825,7 @@ find_save_locs (struct unw_frame_info *info)
1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { 1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1826 /* don't let obviously bad addresses pollute the cache */ 1826 /* don't let obviously bad addresses pollute the cache */
1827 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1827 /* FIXME: should really be level 0 but it occurs too often. KAO */
1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip); 1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1829 info->rp_loc = NULL; 1829 info->rp_loc = NULL;
1830 return -1; 1830 return -1;
1831 } 1831 }
@@ -1838,7 +1838,7 @@ find_save_locs (struct unw_frame_info *info)
1838 spin_unlock_irqrestore(&unw.lock, flags); 1838 spin_unlock_irqrestore(&unw.lock, flags);
1839 UNW_DPRINT(0, 1839 UNW_DPRINT(0,
1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n", 1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1841 __FUNCTION__, info->ip); 1841 __func__, info->ip);
1842 return -1; 1842 return -1;
1843 } 1843 }
1844 have_write_lock = 1; 1844 have_write_lock = 1;
@@ -1882,21 +1882,21 @@ unw_unwind (struct unw_frame_info *info)
1882 if (!unw_valid(info, info->rp_loc)) { 1882 if (!unw_valid(info, info->rp_loc)) {
1883 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1883 /* FIXME: should really be level 0 but it occurs too often. KAO */
1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", 1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1885 __FUNCTION__, info->ip); 1885 __func__, info->ip);
1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 return -1; 1887 return -1;
1888 } 1888 }
1889 /* restore the ip */ 1889 /* restore the ip */
1890 ip = info->ip = *info->rp_loc; 1890 ip = info->ip = *info->rp_loc;
1891 if (ip < GATE_ADDR) { 1891 if (ip < GATE_ADDR) {
1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip); 1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1894 return -1; 1894 return -1;
1895 } 1895 }
1896 1896
1897 /* validate the previous stack frame pointer */ 1897 /* validate the previous stack frame pointer */
1898 if (!unw_valid(info, info->pfs_loc)) { 1898 if (!unw_valid(info, info->pfs_loc)) {
1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__); 1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901 return -1; 1901 return -1;
1902 } 1902 }
@@ -1912,13 +1912,13 @@ unw_unwind (struct unw_frame_info *info)
1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */ 1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1913 info->pfs_loc = 1913 info->pfs_loc =
1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); 1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt); 1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1916 } else 1916 } else
1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ 1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); 1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { 1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", 1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1921 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top); 1921 __func__, info->bsp, info->regstk.limit, info->regstk.top);
1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1923 return -1; 1923 return -1;
1924 } 1924 }
@@ -1927,14 +1927,14 @@ unw_unwind (struct unw_frame_info *info)
1927 info->sp = info->psp; 1927 info->sp = info->psp;
1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { 1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", 1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1930 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit); 1930 __func__, info->sp, info->memstk.top, info->memstk.limit);
1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1932 return -1; 1932 return -1;
1933 } 1933 }
1934 1934
1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { 1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", 1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1937 __FUNCTION__, ip); 1937 __func__, ip);
1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1939 return -1; 1939 return -1;
1940 } 1940 }
@@ -1961,7 +1961,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) 1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1962 < IA64_PT_REGS_SIZE) { 1962 < IA64_PT_REGS_SIZE) {
1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", 1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1964 __FUNCTION__); 1964 __func__);
1965 break; 1965 break;
1966 } 1966 }
1967 if (unw_is_intr_frame(info) && 1967 if (unw_is_intr_frame(info) &&
@@ -1971,13 +1971,13 @@ unw_unwind_to_user (struct unw_frame_info *info)
1971 unw_get_rp(info, &ip); 1971 unw_get_rp(info, &ip);
1972 UNW_DPRINT(0, "unwind.%s: failed to read " 1972 UNW_DPRINT(0, "unwind.%s: failed to read "
1973 "predicate register (ip=0x%lx)\n", 1973 "predicate register (ip=0x%lx)\n",
1974 __FUNCTION__, ip); 1974 __func__, ip);
1975 return -1; 1975 return -1;
1976 } 1976 }
1977 } while (unw_unwind(info) >= 0); 1977 } while (unw_unwind(info) >= 0);
1978 unw_get_ip(info, &ip); 1978 unw_get_ip(info, &ip);
1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", 1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1980 __FUNCTION__, ip); 1980 __func__, ip);
1981 return -1; 1981 return -1;
1982} 1982}
1983EXPORT_SYMBOL(unw_unwind_to_user); 1983EXPORT_SYMBOL(unw_unwind_to_user);
@@ -2028,7 +2028,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2028 " pr 0x%lx\n" 2028 " pr 0x%lx\n"
2029 " sw 0x%lx\n" 2029 " sw 0x%lx\n"
2030 " sp 0x%lx\n", 2030 " sp 0x%lx\n",
2031 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, 2031 __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2032 info->pr, (unsigned long) info->sw, info->sp); 2032 info->pr, (unsigned long) info->sw, info->sp);
2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); 2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2034} 2034}
@@ -2047,7 +2047,7 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
2047 " bsp 0x%lx\n" 2047 " bsp 0x%lx\n"
2048 " sol 0x%lx\n" 2048 " sol 0x%lx\n"
2049 " ip 0x%lx\n", 2049 " ip 0x%lx\n",
2050 __FUNCTION__, info->bsp, sol, info->ip); 2050 __func__, info->bsp, sol, info->ip);
2051 find_save_locs(info); 2051 find_save_locs(info);
2052} 2052}
2053 2053
@@ -2058,7 +2058,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2058{ 2058{
2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); 2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2060 2060
2061 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__); 2061 UNW_DPRINT(1, "unwind.%s\n", __func__);
2062 unw_init_frame_info(info, t, sw); 2062 unw_init_frame_info(info, t, sw);
2063} 2063}
2064EXPORT_SYMBOL(unw_init_from_blocked_task); 2064EXPORT_SYMBOL(unw_init_from_blocked_task);
@@ -2088,7 +2088,7 @@ unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned lon
2088 2088
2089 if (end - start <= 0) { 2089 if (end - start <= 0) {
2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", 2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2091 __FUNCTION__); 2091 __func__);
2092 return NULL; 2092 return NULL;
2093 } 2093 }
2094 2094
@@ -2119,14 +2119,14 @@ unw_remove_unwind_table (void *handle)
2119 2119
2120 if (!handle) { 2120 if (!handle) {
2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", 2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2122 __FUNCTION__); 2122 __func__);
2123 return; 2123 return;
2124 } 2124 }
2125 2125
2126 table = handle; 2126 table = handle;
2127 if (table == &unw.kernel_table) { 2127 if (table == &unw.kernel_table) {
2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " 2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2129 "no-can-do!\n", __FUNCTION__); 2129 "no-can-do!\n", __func__);
2130 return; 2130 return;
2131 } 2131 }
2132 2132
@@ -2139,7 +2139,7 @@ unw_remove_unwind_table (void *handle)
2139 break; 2139 break;
2140 if (!prev) { 2140 if (!prev) {
2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", 2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2142 __FUNCTION__, (void *) table); 2142 __func__, (void *) table);
2143 spin_unlock_irqrestore(&unw.lock, flags); 2143 spin_unlock_irqrestore(&unw.lock, flags);
2144 return; 2144 return;
2145 } 2145 }
@@ -2185,7 +2185,7 @@ create_gate_table (void)
2185 } 2185 }
2186 2186
2187 if (!punw) { 2187 if (!punw) {
2188 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__); 2188 printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2189 return 0; 2189 return 0;
2190 } 2190 }
2191 2191
@@ -2202,7 +2202,7 @@ create_gate_table (void)
2202 unw.gate_table = kmalloc(size, GFP_KERNEL); 2202 unw.gate_table = kmalloc(size, GFP_KERNEL);
2203 if (!unw.gate_table) { 2203 if (!unw.gate_table) {
2204 unw.gate_table_size = 0; 2204 unw.gate_table_size = 0;
2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__); 2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2206 return 0; 2206 return 0;
2207 } 2207 }
2208 unw.gate_table_size = size; 2208 unw.gate_table_size = size;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 25aef6211a54..a4ca657c72c6 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -714,7 +714,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
714 714
715 if (ret) 715 if (ret)
716 printk("%s: Problem encountered in __add_pages() as ret=%d\n", 716 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
717 __FUNCTION__, ret); 717 __func__, ret);
718 718
719 return ret; 719 return ret;
720} 720}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 8fd7e825192b..e282c348dcde 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -765,7 +765,7 @@ static void __init set_pci_cacheline_size(void)
765 status = ia64_pal_cache_summary(&levels, &unique_caches); 765 status = ia64_pal_cache_summary(&levels, &unique_caches);
766 if (status != 0) { 766 if (status != 0) {
767 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " 767 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
768 "(status=%ld)\n", __FUNCTION__, status); 768 "(status=%ld)\n", __func__, status);
769 return; 769 return;
770 } 770 }
771 771
@@ -773,7 +773,7 @@ static void __init set_pci_cacheline_size(void)
773 /* cache_type (data_or_unified)= */ 2, &cci); 773 /* cache_type (data_or_unified)= */ 2, &cci);
774 if (status != 0) { 774 if (status != 0) {
775 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " 775 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
776 "(status=%ld)\n", __FUNCTION__, status); 776 "(status=%ld)\n", __func__, status);
777 return; 777 return;
778 } 778 }
779 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 779 pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index b663168da55c..0101c7924a4d 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -37,7 +37,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
37 (u64) nasid, 0, 0, 0, 0, 0, 0); 37 (u64) nasid, 0, 0, 0, 0, 0, 0);
38 38
39 if ((int)ret_stuff.v0) 39 if ((int)ret_stuff.v0)
40 panic("%s: Fatal %s Error", __FUNCTION__, 40 panic("%s: Fatal %s Error", __func__,
41 ((nasid & 1) ? "TIO" : "HUBII")); 41 ((nasid & 1) ? "TIO" : "HUBII"));
42 42
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
@@ -48,7 +48,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
48 (u64) nasid, 0, 0, 0, 0, 0, 0); 48 (u64) nasid, 0, 0, 0, 0, 0, 0);
49 49
50 if ((int)ret_stuff.v0) 50 if ((int)ret_stuff.v0)
51 panic("%s: Fatal TIO Error", __FUNCTION__); 51 panic("%s: Fatal TIO Error", __func__);
52 } else 52 } else
53 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 53 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
54 54
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index 3c7178f5dce8..6568942a95f0 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -133,7 +133,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
133 if (ACPI_FAILURE(status)) { 133 if (ACPI_FAILURE(status)) {
134 printk(KERN_ERR "%s: " 134 printk(KERN_ERR "%s: "
135 "acpi_get_vendor_resource() failed (0x%x) for: ", 135 "acpi_get_vendor_resource() failed (0x%x) for: ",
136 __FUNCTION__, status); 136 __func__, status);
137 acpi_ns_print_node_pathname(handle, NULL); 137 acpi_ns_print_node_pathname(handle, NULL);
138 printk("\n"); 138 printk("\n");
139 return NULL; 139 return NULL;
@@ -145,7 +145,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
145 sizeof(struct pcibus_bussoft *)) { 145 sizeof(struct pcibus_bussoft *)) {
146 printk(KERN_ERR 146 printk(KERN_ERR
147 "%s: Invalid vendor data length %d\n", 147 "%s: Invalid vendor data length %d\n",
148 __FUNCTION__, vendor->byte_length); 148 __func__, vendor->byte_length);
149 kfree(buffer.pointer); 149 kfree(buffer.pointer);
150 return NULL; 150 return NULL;
151 } 151 }
@@ -184,7 +184,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
184 if (ACPI_FAILURE(status)) { 184 if (ACPI_FAILURE(status)) {
185 printk(KERN_ERR 185 printk(KERN_ERR
186 "%s: acpi_get_vendor_resource() failed (0x%x) for: ", 186 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
187 __FUNCTION__, status); 187 __func__, status);
188 acpi_ns_print_node_pathname(handle, NULL); 188 acpi_ns_print_node_pathname(handle, NULL);
189 printk("\n"); 189 printk("\n");
190 return 1; 190 return 1;
@@ -196,7 +196,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
196 sizeof(struct pci_devdev_info *)) { 196 sizeof(struct pci_devdev_info *)) {
197 printk(KERN_ERR 197 printk(KERN_ERR
198 "%s: Invalid vendor data length: %d for: ", 198 "%s: Invalid vendor data length: %d for: ",
199 __FUNCTION__, vendor->byte_length); 199 __func__, vendor->byte_length);
200 acpi_ns_print_node_pathname(handle, NULL); 200 acpi_ns_print_node_pathname(handle, NULL);
201 printk("\n"); 201 printk("\n");
202 ret = 1; 202 ret = 1;
@@ -205,7 +205,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
205 205
206 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 206 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
207 if (!pcidev_ptr) 207 if (!pcidev_ptr)
208 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); 208 panic("%s: Unable to alloc memory for pcidev_info", __func__);
209 209
210 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); 210 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
211 pcidev_prom_ptr = __va(addr); 211 pcidev_prom_ptr = __va(addr);
@@ -214,7 +214,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
214 /* Get the IRQ info */ 214 /* Get the IRQ info */
215 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 215 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
216 if (!irq_info) 216 if (!irq_info)
217 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); 217 panic("%s: Unable to alloc memory for sn_irq_info", __func__);
218 218
219 if (pcidev_ptr->pdi_sn_irq_info) { 219 if (pcidev_ptr->pdi_sn_irq_info) {
220 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); 220 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
@@ -249,10 +249,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
249 status = acpi_get_parent(child, &parent); 249 status = acpi_get_parent(child, &parent);
250 if (ACPI_FAILURE(status)) { 250 if (ACPI_FAILURE(status)) {
251 printk(KERN_ERR "%s: acpi_get_parent() failed " 251 printk(KERN_ERR "%s: acpi_get_parent() failed "
252 "(0x%x) for: ", __FUNCTION__, status); 252 "(0x%x) for: ", __func__, status);
253 acpi_ns_print_node_pathname(child, NULL); 253 acpi_ns_print_node_pathname(child, NULL);
254 printk("\n"); 254 printk("\n");
255 panic("%s: Unable to find host devfn\n", __FUNCTION__); 255 panic("%s: Unable to find host devfn\n", __func__);
256 } 256 }
257 if (parent == rootbus_handle) 257 if (parent == rootbus_handle)
258 break; 258 break;
@@ -260,7 +260,7 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
260 } 260 }
261 if (!child) { 261 if (!child) {
262 printk(KERN_ERR "%s: Unable to find root bus for: ", 262 printk(KERN_ERR "%s: Unable to find root bus for: ",
263 __FUNCTION__); 263 __func__);
264 acpi_ns_print_node_pathname(device_handle, NULL); 264 acpi_ns_print_node_pathname(device_handle, NULL);
265 printk("\n"); 265 printk("\n");
266 BUG(); 266 BUG();
@@ -269,10 +269,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
269 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); 269 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
270 if (ACPI_FAILURE(status)) { 270 if (ACPI_FAILURE(status)) {
271 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", 271 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
272 __FUNCTION__, status); 272 __func__, status);
273 acpi_ns_print_node_pathname(child, NULL); 273 acpi_ns_print_node_pathname(child, NULL);
274 printk("\n"); 274 printk("\n");
275 panic("%s: Unable to find host devfn\n", __FUNCTION__); 275 panic("%s: Unable to find host devfn\n", __func__);
276 } 276 }
277 277
278 slot = (adr >> 16) & 0xffff; 278 slot = (adr >> 16) & 0xffff;
@@ -308,7 +308,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
308 if (ACPI_FAILURE(status)) { 308 if (ACPI_FAILURE(status)) {
309 printk(KERN_ERR 309 printk(KERN_ERR
310 "%s: acpi_get_parent() failed (0x%x) for: ", 310 "%s: acpi_get_parent() failed (0x%x) for: ",
311 __FUNCTION__, status); 311 __func__, status);
312 acpi_ns_print_node_pathname(handle, NULL); 312 acpi_ns_print_node_pathname(handle, NULL);
313 printk("\n"); 313 printk("\n");
314 return AE_OK; 314 return AE_OK;
@@ -318,7 +318,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
318 if (ACPI_FAILURE(status)) { 318 if (ACPI_FAILURE(status)) {
319 printk(KERN_ERR 319 printk(KERN_ERR
320 "%s: Failed to find _BBN in parent of: ", 320 "%s: Failed to find _BBN in parent of: ",
321 __FUNCTION__); 321 __func__);
322 acpi_ns_print_node_pathname(handle, NULL); 322 acpi_ns_print_node_pathname(handle, NULL);
323 printk("\n"); 323 printk("\n");
324 return AE_OK; 324 return AE_OK;
@@ -358,14 +358,14 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
358 if (segment != pci_domain_nr(dev)) { 358 if (segment != pci_domain_nr(dev)) {
359 printk(KERN_ERR 359 printk(KERN_ERR
360 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ", 360 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
361 __FUNCTION__, segment, pci_domain_nr(dev)); 361 __func__, segment, pci_domain_nr(dev));
362 acpi_ns_print_node_pathname(rootbus_handle, NULL); 362 acpi_ns_print_node_pathname(rootbus_handle, NULL);
363 printk("\n"); 363 printk("\n");
364 return 1; 364 return 1;
365 } 365 }
366 } else { 366 } else {
367 printk(KERN_ERR "%s: Unable to get __SEG from: ", 367 printk(KERN_ERR "%s: Unable to get __SEG from: ",
368 __FUNCTION__); 368 __func__);
369 acpi_ns_print_node_pathname(rootbus_handle, NULL); 369 acpi_ns_print_node_pathname(rootbus_handle, NULL);
370 printk("\n"); 370 printk("\n");
371 return 1; 371 return 1;
@@ -386,7 +386,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
386 if (!pcidev_match.handle) { 386 if (!pcidev_match.handle) {
387 printk(KERN_ERR 387 printk(KERN_ERR
388 "%s: Could not find matching ACPI device for %s.\n", 388 "%s: Could not find matching ACPI device for %s.\n",
389 __FUNCTION__, pci_name(dev)); 389 __func__, pci_name(dev));
390 return 1; 390 return 1;
391 } 391 }
392 392
@@ -422,7 +422,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
422 422
423 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { 423 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
424 panic("%s: Failure obtaining pcidev_info for %s\n", 424 panic("%s: Failure obtaining pcidev_info for %s\n",
425 __FUNCTION__, pci_name(dev)); 425 __func__, pci_name(dev));
426 } 426 }
427 427
428 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 428 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
@@ -463,7 +463,7 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
463 printk(KERN_ERR 463 printk(KERN_ERR
464 "%s: 0x%04x:0x%02x Unable to " 464 "%s: 0x%04x:0x%02x Unable to "
465 "obtain prom_bussoft_ptr\n", 465 "obtain prom_bussoft_ptr\n",
466 __FUNCTION__, pci_domain_nr(bus), bus->number); 466 __func__, pci_domain_nr(bus), bus->number);
467 return; 467 return;
468 } 468 }
469 sn_common_bus_fixup(bus, prom_bussoft_ptr); 469 sn_common_bus_fixup(bus, prom_bussoft_ptr);
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index c4eb84f9e781..8a924a5661dd 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
364 364
365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); 365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
366 if (!element) { 366 if (!element) {
367 dev_dbg(&dev->dev, "%s: out of memory!\n", __FUNCTION__); 367 dev_dbg(&dev->dev, "%s: out of memory!\n", __func__);
368 return; 368 return;
369 } 369 }
370 element->sysdata = SN_PCIDEV_INFO(dev); 370 element->sysdata = SN_PCIDEV_INFO(dev);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 906b93674b76..c3aa851d1ca6 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -209,11 +209,11 @@ sn_io_slot_fixup(struct pci_dev *dev)
209 209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info) 211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); 212 panic("%s: Unable to alloc memory for pcidev_info", __func__);
213 213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info) 215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); 216 panic("%s: Unable to alloc memory for sn_irq_info", __func__);
217 217
218 /* Call to retrieve pci device information needed by kernel. */ 218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev), 219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 868c9aa64fe2..27793f7aa99c 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -100,7 +100,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
100 if (!newbuf) { 100 if (!newbuf) {
101 mutex_unlock(&sn_oemdata_mutex); 101 mutex_unlock(&sn_oemdata_mutex);
102 printk(KERN_ERR "%s: unable to extend sn_oemdata\n", 102 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
103 __FUNCTION__); 103 __func__);
104 return 1; 104 return 1;
105 } 105 }
106 vfree(*sn_oemdata); 106 vfree(*sn_oemdata);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 511db2fd7bff..18b94b792d54 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -116,7 +116,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, 116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
117 SN_DMA_ADDR_PHYS); 117 SN_DMA_ADDR_PHYS);
118 if (!*dma_handle) { 118 if (!*dma_handle) {
119 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 119 printk(KERN_ERR "%s: out of ATEs\n", __func__);
120 free_pages((unsigned long)cpuaddr, get_order(size)); 120 free_pages((unsigned long)cpuaddr, get_order(size));
121 return NULL; 121 return NULL;
122 } 122 }
@@ -179,7 +179,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
179 phys_addr = __pa(cpu_addr); 179 phys_addr = __pa(cpu_addr);
180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); 180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
181 if (!dma_addr) { 181 if (!dma_addr) {
182 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 182 printk(KERN_ERR "%s: out of ATEs\n", __func__);
183 return 0; 183 return 0;
184 } 184 }
185 return dma_addr; 185 return dma_addr;
@@ -266,7 +266,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
266 SN_DMA_ADDR_PHYS); 266 SN_DMA_ADDR_PHYS);
267 267
268 if (!sg->dma_address) { 268 if (!sg->dma_address) {
269 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 269 printk(KERN_ERR "%s: out of ATEs\n", __func__);
270 270
271 /* 271 /*
272 * Free any successfully allocated entries. 272 * Free any successfully allocated entries.
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index ef048a674772..529462c01570 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -88,7 +88,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
88 break; 88 break;
89 default: 89 default:
90 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " 90 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
91 "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); 91 "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
92 return -1; 92 return -1;
93 } 93 }
94 94
@@ -124,7 +124,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
124 if (!tmp) { 124 if (!tmp) {
125 printk(KERN_ERR "%s: Could not allocate " 125 printk(KERN_ERR "%s: Could not allocate "
126 "%lu bytes (order %d) for GART\n", 126 "%lu bytes (order %d) for GART\n",
127 __FUNCTION__, 127 __func__,
128 tioca_kern->ca_gart_size, 128 tioca_kern->ca_gart_size,
129 get_order(tioca_kern->ca_gart_size)); 129 get_order(tioca_kern->ca_gart_size));
130 return -ENOMEM; 130 return -ENOMEM;
@@ -341,7 +341,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
341 341
342 if (node_upper > 64) { 342 if (node_upper > 64) {
343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out " 343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
344 "of range\n", __FUNCTION__, (void *)ct_addr); 344 "of range\n", __func__, (void *)ct_addr);
345 return 0; 345 return 0;
346 } 346 }
347 347
@@ -349,7 +349,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { 349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
350 printk(KERN_ERR "%s: coretalk upper node (%u) " 350 printk(KERN_ERR "%s: coretalk upper node (%u) "
351 "mismatch with ca_agp_dma_addr_extn (%lu)\n", 351 "mismatch with ca_agp_dma_addr_extn (%lu)\n",
352 __FUNCTION__, 352 __func__,
353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); 353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
354 return 0; 354 return 0;
355 } 355 }
@@ -597,7 +597,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
597 if (is_shub1() && sn_sal_rev() < 0x0406) { 597 if (is_shub1() && sn_sal_rev() < 0x0406) {
598 printk 598 printk
599 (KERN_ERR "%s: SGI prom rev 4.06 or greater required " 599 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
600 "for tioca support\n", __FUNCTION__); 600 "for tioca support\n", __func__);
601 return NULL; 601 return NULL;
602 } 602 }
603 603
@@ -651,7 +651,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
651 printk(KERN_WARNING 651 printk(KERN_WARNING
652 "%s: Unable to get irq %d. " 652 "%s: Unable to get irq %d. "
653 "Error interrupts won't be routed for TIOCA bus %d\n", 653 "Error interrupts won't be routed for TIOCA bus %d\n",
654 __FUNCTION__, SGI_TIOCA_ERROR, 654 __func__, SGI_TIOCA_ERROR,
655 (int)tioca_common->ca_common.bs_persist_busnum); 655 (int)tioca_common->ca_common.bs_persist_busnum);
656 656
657 sn_set_err_irq_affinity(SGI_TIOCA_ERROR); 657 sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 999f14f986e2..9b3c11373022 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -494,7 +494,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
494 if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { 494 if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
495 printk(KERN_WARNING 495 printk(KERN_WARNING
496 "%s: %s - no map found for bus_addr 0x%lx\n", 496 "%s: %s - no map found for bus_addr 0x%lx\n",
497 __FUNCTION__, pci_name(pdev), bus_addr); 497 __func__, pci_name(pdev), bus_addr);
498 } else if (--map->refcnt == 0) { 498 } else if (--map->refcnt == 0) {
499 for (i = 0; i < map->ate_count; i++) { 499 for (i = 0; i < map->ate_count; i++) {
500 map->ate_shadow[i] = 0; 500 map->ate_shadow[i] = 0;
@@ -1030,7 +1030,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
1030 "%s: Unable to get irq %d. " 1030 "%s: Unable to get irq %d. "
1031 "Error interrupts won't be routed for " 1031 "Error interrupts won't be routed for "
1032 "TIOCE bus %04x:%02x\n", 1032 "TIOCE bus %04x:%02x\n",
1033 __FUNCTION__, SGI_PCIASIC_ERROR, 1033 __func__, SGI_PCIASIC_ERROR,
1034 tioce_common->ce_pcibus.bs_persist_segment, 1034 tioce_common->ce_pcibus.bs_persist_segment,
1035 tioce_common->ce_pcibus.bs_persist_busnum); 1035 tioce_common->ce_pcibus.bs_persist_busnum);
1036 1036