aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/generic_defconfig (renamed from arch/ia64/defconfig)0
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c36
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/ia32/ia32_signal.c13
-rw-r--r--arch/ia64/ia32/sys_ia32.c7
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c32
-rw-r--r--arch/ia64/kernel/iosapic.c28
-rw-r--r--arch/ia64/kernel/irq_ia64.c142
-rw-r--r--arch/ia64/kernel/kprobes.c7
-rw-r--r--arch/ia64/kernel/mca.c73
-rw-r--r--arch/ia64/kernel/module.c22
-rw-r--r--arch/ia64/kernel/msi_ia64.c3
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c4
-rw-r--r--arch/ia64/kernel/ptrace.c327
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/setup.c8
-rw-r--r--arch/ia64/kernel/signal.c36
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c102
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/pci/fixup.c2
-rw-r--r--arch/ia64/pci/pci.c4
-rw-r--r--arch/ia64/sn/kernel/huberror.c4
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c36
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/io_init.c4
-rw-r--r--arch/ia64/sn/kernel/mca.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c6
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c12
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
37 files changed, 446 insertions, 504 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index dff9edfc7465..8fa3faf5ef1b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -18,6 +18,7 @@ config IA64
18 select HAVE_IDE 18 select HAVE_IDE
19 select HAVE_OPROFILE 19 select HAVE_OPROFILE
20 select HAVE_KPROBES 20 select HAVE_KPROBES
21 select HAVE_KRETPROBES
21 default y 22 default y
22 help 23 help
23 The Itanium Processor Family is Intel's 64-bit successor to 24 The Itanium Processor Family is Intel's 64-bit successor to
@@ -155,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB
155 156
156config IA64_SGI_SN2 157config IA64_SGI_SN2
157 bool "SGI-SN2" 158 bool "SGI-SN2"
159 select NUMA
160 select ACPI_NUMA
158 help 161 help
159 Selecting this option will optimize the kernel for use on sn2 based 162 Selecting this option will optimize the kernel for use on sn2 based
160 systems, but the resulting kernel binary will not run on other 163 systems, but the resulting kernel binary will not run on other
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index b916ccfdef84..f1645c4f7039 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -11,6 +11,8 @@
11# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> 11# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
12# 12#
13 13
14KBUILD_DEFCONFIG := generic_defconfig
15
14NM := $(CROSS_COMPILE)nm -B 16NM := $(CROSS_COMPILE)nm -B
15READELF := $(CROSS_COMPILE)readelf 17READELF := $(CROSS_COMPILE)readelf
16 18
diff --git a/arch/ia64/defconfig b/arch/ia64/configs/generic_defconfig
index 0210545e7f61..0210545e7f61 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/configs/generic_defconfig
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 94e57109fad6..8f6bcfe1dada 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -71,7 +71,7 @@ hwsw_init (void)
71#ifdef CONFIG_IA64_GENERIC 71#ifdef CONFIG_IA64_GENERIC
72 /* Better to have normal DMA than panic */ 72 /* Better to have normal DMA than panic */
73 printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," 73 printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
74 " reverting to hpzx1 platform vector\n", __FUNCTION__); 74 " reverting to hpzx1 platform vector\n", __func__);
75 machvec_init("hpzx1"); 75 machvec_init("hpzx1");
76#else 76#else
77 panic("Unable to initialize software I/O TLB services"); 77 panic("Unable to initialize software I/O TLB services");
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index a94445422cc6..523eae6d3e49 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -529,7 +529,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
529 base_mask = RESMAP_MASK(bits_wanted); 529 base_mask = RESMAP_MASK(bits_wanted);
530 mask = base_mask << bitshiftcnt; 530 mask = base_mask << bitshiftcnt;
531 531
532 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 532 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
533 for(; res_ptr < res_end ; res_ptr++) 533 for(; res_ptr < res_end ; res_ptr++)
534 { 534 {
535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
@@ -679,7 +679,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
679#endif 679#endif
680 680
681 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 681 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
682 __FUNCTION__, size, pages_needed, pide, 682 __func__, size, pages_needed, pide,
683 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 683 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
684 ioc->res_bitshift ); 684 ioc->res_bitshift );
685 685
@@ -722,8 +722,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
722 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); 722 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
723 bits_not_wanted = 0; 723 bits_not_wanted = 0;
724 724
725 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, 725 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
726 bits_not_wanted, m, pide, res_ptr, *res_ptr); 726 bits_not_wanted, m, pide, res_ptr, *res_ptr);
727 727
728 ASSERT(m != 0); 728 ASSERT(m != 0);
729 ASSERT(bits_not_wanted); 729 ASSERT(bits_not_wanted);
@@ -940,8 +940,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
940 940
941 iovp = (dma_addr_t) pide << iovp_shift; 941 iovp = (dma_addr_t) pide << iovp_shift;
942 942
943 DBG_RUN("%s() 0x%p -> 0x%lx\n", 943 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
944 __FUNCTION__, addr, (long) iovp | offset);
945 944
946 pdir_start = &(ioc->pdir_base[pide]); 945 pdir_start = &(ioc->pdir_base[pide]);
947 946
@@ -1029,8 +1028,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1029#endif 1028#endif
1030 offset = iova & ~iovp_mask; 1029 offset = iova & ~iovp_mask;
1031 1030
1032 DBG_RUN("%s() iovp 0x%lx/%x\n", 1031 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1033 __FUNCTION__, (long) iova, size);
1034 1032
1035 iova ^= offset; /* clear offset bits */ 1033 iova ^= offset; /* clear offset bits */
1036 size += offset; 1034 size += offset;
@@ -1404,7 +1402,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1404 struct scatterlist *sg; 1402 struct scatterlist *sg;
1405#endif 1403#endif
1406 1404
1407 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 1405 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1408 ioc = GET_IOC(dev); 1406 ioc = GET_IOC(dev);
1409 ASSERT(ioc); 1407 ASSERT(ioc);
1410 1408
@@ -1468,7 +1466,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1468#endif 1466#endif
1469 1467
1470 ASSERT(coalesced == filled); 1468 ASSERT(coalesced == filled);
1471 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1469 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1472 1470
1473 return filled; 1471 return filled;
1474} 1472}
@@ -1491,7 +1489,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1491#endif 1489#endif
1492 1490
1493 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1491 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1494 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); 1492 __func__, nents, sba_sg_address(sglist), sglist->length);
1495 1493
1496#ifdef ASSERT_PDIR_SANITY 1494#ifdef ASSERT_PDIR_SANITY
1497 ioc = GET_IOC(dev); 1495 ioc = GET_IOC(dev);
@@ -1509,7 +1507,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1509 nents--; 1507 nents--;
1510 } 1508 }
1511 1509
1512 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1510 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1513 1511
1514#ifdef ASSERT_PDIR_SANITY 1512#ifdef ASSERT_PDIR_SANITY
1515 spin_lock_irqsave(&ioc->res_lock, flags); 1513 spin_lock_irqsave(&ioc->res_lock, flags);
@@ -1546,7 +1544,7 @@ ioc_iova_init(struct ioc *ioc)
1546 ioc->iov_size = ~ioc->imask + 1; 1544 ioc->iov_size = ~ioc->imask + 1;
1547 1545
1548 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", 1546 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1549 __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, 1547 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1550 ioc->iov_size >> 20); 1548 ioc->iov_size >> 20);
1551 1549
1552 switch (iovp_size) { 1550 switch (iovp_size) {
@@ -1569,7 +1567,7 @@ ioc_iova_init(struct ioc *ioc)
1569 1567
1570 memset(ioc->pdir_base, 0, ioc->pdir_size); 1568 memset(ioc->pdir_base, 0, ioc->pdir_size);
1571 1569
1572 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, 1570 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1573 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); 1571 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1574 1572
1575 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); 1573 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
@@ -1612,7 +1610,7 @@ ioc_iova_init(struct ioc *ioc)
1612 1610
1613 prefetch_spill_page = virt_to_phys(addr); 1611 prefetch_spill_page = virt_to_phys(addr);
1614 1612
1615 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); 1613 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1616 } 1614 }
1617 /* 1615 /*
1618 ** Set all the PDIR entries valid w/ the spill page as the target 1616 ** Set all the PDIR entries valid w/ the spill page as the target
@@ -1641,7 +1639,7 @@ ioc_resource_init(struct ioc *ioc)
1641 /* resource map size dictated by pdir_size */ 1639 /* resource map size dictated by pdir_size */
1642 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ 1640 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1643 ioc->res_size >>= 3; /* convert bit count to byte count */ 1641 ioc->res_size >>= 3; /* convert bit count to byte count */
1644 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); 1642 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1645 1643
1646 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, 1644 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1647 get_order(ioc->res_size)); 1645 get_order(ioc->res_size));
@@ -1664,7 +1662,7 @@ ioc_resource_init(struct ioc *ioc)
1664 | prefetch_spill_page); 1662 | prefetch_spill_page);
1665#endif 1663#endif
1666 1664
1667 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, 1665 DBG_INIT("%s() res_map %x %p\n", __func__,
1668 ioc->res_size, (void *) ioc->res_map); 1666 ioc->res_size, (void *) ioc->res_map);
1669} 1667}
1670 1668
@@ -1767,7 +1765,7 @@ ioc_init(u64 hpa, void *handle)
1767 iovp_size = (1 << iovp_shift); 1765 iovp_size = (1 << iovp_shift);
1768 iovp_mask = ~(iovp_size - 1); 1766 iovp_mask = ~(iovp_size - 1);
1769 1767
1770 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, 1768 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1771 PAGE_SIZE >> 10, iovp_size >> 10); 1769 PAGE_SIZE >> 10, iovp_size >> 10);
1772 1770
1773 if (!ioc->name) { 1771 if (!ioc->name) {
@@ -2137,7 +2135,7 @@ sba_page_override(char *str)
2137 break; 2135 break;
2138 default: 2136 default:
2139 printk("%s: unknown/unsupported iommu page size %ld\n", 2137 printk("%s: unknown/unsupported iommu page size %ld\n",
2140 __FUNCTION__, page_size); 2138 __func__, page_size);
2141 } 2139 }
2142 2140
2143 return 1; 2141 return 1;
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index 9898febf609a..969fe9f443c4 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -222,7 +222,7 @@ simeth_probe1(void)
222 } 222 }
223 223
224 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) 224 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
225 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 225 panic("%s: out of interrupt vectors!\n", __func__);
226 dev->irq = rc; 226 dev->irq = rc;
227 227
228 /* 228 /*
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index ef252df50e1e..eb0c32a85fd7 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -1000,7 +1000,7 @@ simrs_init (void)
1000 if (!state->irq) { 1000 if (!state->irq) {
1001 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) 1001 if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
1002 panic("%s: out of interrupt vectors!\n", 1002 panic("%s: out of interrupt vectors!\n",
1003 __FUNCTION__); 1003 __func__);
1004 state->irq = rc; 1004 state->irq = rc;
1005 ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); 1005 ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq);
1006 } 1006 }
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 85e82f32e480..256a7faeda07 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
766 766
767 /* This is the X/Open sanctioned signal stack switching. */ 767 /* This is the X/Open sanctioned signal stack switching. */
768 if (ka->sa.sa_flags & SA_ONSTACK) { 768 if (ka->sa.sa_flags & SA_ONSTACK) {
769 if (!on_sig_stack(esp)) 769 int onstack = sas_ss_flags(esp);
770
771 if (onstack == 0)
770 esp = current->sas_ss_sp + current->sas_ss_size; 772 esp = current->sas_ss_sp + current->sas_ss_size;
773 else if (onstack == SS_ONSTACK) {
774 /*
775 * If we are on the alternate signal stack and would
776 * overflow it, don't. Return an always-bogus address
777 * instead so we will die with SIGSEGV.
778 */
779 if (!likely(on_sig_stack(esp - frame_size)))
780 return (void __user *) -1L;
781 }
771 } 782 }
772 /* Legacy stack switching not supported */ 783 /* Legacy stack switching not supported */
773 784
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index d025a22eb225..b1bf51fe97b4 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -32,13 +32,8 @@
32#include <linux/shm.h> 32#include <linux/shm.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/uio.h> 34#include <linux/uio.h>
35#include <linux/nfs_fs.h> 35#include <linux/socket.h>
36#include <linux/quota.h> 36#include <linux/quota.h>
37#include <linux/sunrpc/svc.h>
38#include <linux/nfsd/nfsd.h>
39#include <linux/nfsd/cache.h>
40#include <linux/nfsd/xdr.h>
41#include <linux/nfsd/syscall.h>
42#include <linux/poll.h> 37#include <linux/poll.h>
43#include <linux/eventpoll.h> 38#include <linux/eventpoll.h>
44#include <linux/personality.h> 39#include <linux/personality.h>
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f1cf2df97a2d..fbe742ad2fde 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
155 if (val == DIE_INIT_MONARCH_LEAVE) 155 if (val == DIE_INIT_MONARCH_LEAVE)
156 ia64_mca_printk(KERN_NOTICE 156 ia64_mca_printk(KERN_NOTICE
157 "%s: kdump not configured\n", 157 "%s: kdump not configured\n",
158 __FUNCTION__); 158 __func__);
159 return NOTIFY_DONE; 159 return NOTIFY_DONE;
160 } 160 }
161 161
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 919070a9aed7..728d7247a1a6 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -379,8 +379,8 @@ efi_get_pal_addr (void)
379 * a dedicated ITR for the PAL code. 379 * a dedicated ITR for the PAL code.
380 */ 380 */
381 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 if ((vaddr & mask) == (KERNEL_START & mask)) {
382 printk(KERN_INFO "%s: no need to install ITR for " 382 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
383 "PAL code\n", __FUNCTION__); 383 __func__);
384 continue; 384 continue;
385 } 385 }
386 386
@@ -399,7 +399,7 @@ efi_get_pal_addr (void)
399 return __va(md->phys_addr); 399 return __va(md->phys_addr);
400 } 400 }
401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 401 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
402 __FUNCTION__); 402 __func__);
403 return NULL; 403 return NULL;
404} 404}
405 405
@@ -543,12 +543,30 @@ efi_init (void)
543 for (i = 0, p = efi_map_start; p < efi_map_end; 543 for (i = 0, p = efi_map_start; p < efi_map_end;
544 ++i, p += efi_desc_size) 544 ++i, p += efi_desc_size)
545 { 545 {
546 const char *unit;
547 unsigned long size;
548
546 md = p; 549 md = p;
547 printk("mem%02u: type=%u, attr=0x%lx, " 550 size = md->num_pages << EFI_PAGE_SHIFT;
548 "range=[0x%016lx-0x%016lx) (%luMB)\n", 551
552 if ((size >> 40) > 0) {
553 size >>= 40;
554 unit = "TB";
555 } else if ((size >> 30) > 0) {
556 size >>= 30;
557 unit = "GB";
558 } else if ((size >> 20) > 0) {
559 size >>= 20;
560 unit = "MB";
561 } else {
562 size >>= 10;
563 unit = "KB";
564 }
565
566 printk("mem%02d: type=%2u, attr=0x%016lx, "
567 "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
549 i, md->type, md->attribute, md->phys_addr, 568 i, md->type, md->attribute, md->phys_addr,
550 md->phys_addr + efi_md_size(md), 569 md->phys_addr + efi_md_size(md), size, unit);
551 md->num_pages >> (20 - EFI_PAGE_SHIFT));
552 } 570 }
553 } 571 }
554#endif 572#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 398e2fd1cd25..082c31dcfd99 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
345 if (cpus_empty(mask)) 345 if (cpus_empty(mask))
346 return; 346 return;
347 347
348 if (reassign_irq_vector(irq, first_cpu(mask))) 348 if (irq_prepare_move(irq, first_cpu(mask)))
349 return; 349 return;
350 350
351 dest = cpu_physical_id(first_cpu(mask)); 351 dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
397 struct iosapic_rte_info *rte; 397 struct iosapic_rte_info *rte;
398 int do_unmask_irq = 0; 398 int do_unmask_irq = 0;
399 399
400 irq_complete_move(irq);
400 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
401 do_unmask_irq = 1; 402 do_unmask_irq = 1;
402 mask_irq(irq); 403 mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
450{ 451{
451 irq_desc_t *idesc = irq_desc + irq; 452 irq_desc_t *idesc = irq_desc + irq;
452 453
454 irq_complete_move(irq);
453 move_native_irq(irq); 455 move_native_irq(irq);
454 /* 456 /*
455 * Once we have recorded IRQ_PENDING already, we can mask the 457 * Once we have recorded IRQ_PENDING already, we can mask the
@@ -532,7 +534,7 @@ iosapic_reassign_vector (int irq)
532 if (iosapic_intr_info[irq].count) { 534 if (iosapic_intr_info[irq].count) {
533 new_irq = create_irq(); 535 new_irq = create_irq();
534 if (new_irq < 0) 536 if (new_irq < 0)
535 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 537 panic("%s: out of interrupt vectors!\n", __func__);
536 printk(KERN_INFO "Reassigning vector %d to %d\n", 538 printk(KERN_INFO "Reassigning vector %d to %d\n",
537 irq_to_vector(irq), irq_to_vector(new_irq)); 539 irq_to_vector(irq), irq_to_vector(new_irq));
538 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], 540 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
@@ -597,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
597 index = find_iosapic(gsi); 599 index = find_iosapic(gsi);
598 if (index < 0) { 600 if (index < 0) {
599 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 601 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
600 __FUNCTION__, gsi); 602 __func__, gsi);
601 return -ENODEV; 603 return -ENODEV;
602 } 604 }
603 605
@@ -606,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
606 rte = iosapic_alloc_rte(); 608 rte = iosapic_alloc_rte();
607 if (!rte) { 609 if (!rte) {
608 printk(KERN_WARNING "%s: cannot allocate memory\n", 610 printk(KERN_WARNING "%s: cannot allocate memory\n",
609 __FUNCTION__); 611 __func__);
610 return -ENOMEM; 612 return -ENOMEM;
611 } 613 }
612 614
@@ -623,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
623 (info->trigger != trigger || info->polarity != polarity)){ 625 (info->trigger != trigger || info->polarity != polarity)){
624 printk (KERN_WARNING 626 printk (KERN_WARNING
625 "%s: cannot override the interrupt\n", 627 "%s: cannot override the interrupt\n",
626 __FUNCTION__); 628 __func__);
627 return -EINVAL; 629 return -EINVAL;
628 } 630 }
629 rte->refcnt++; 631 rte->refcnt++;
@@ -645,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
645 if (idesc->chip != &no_irq_type) 647 if (idesc->chip != &no_irq_type)
646 printk(KERN_WARNING 648 printk(KERN_WARNING
647 "%s: changing vector %d from %s to %s\n", 649 "%s: changing vector %d from %s to %s\n",
648 __FUNCTION__, irq_to_vector(irq), 650 __func__, irq_to_vector(irq),
649 idesc->chip->name, irq_type->name); 651 idesc->chip->name, irq_type->name);
650 idesc->chip = irq_type; 652 idesc->chip = irq_type;
651 } 653 }
@@ -918,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
918 case ACPI_INTERRUPT_INIT: 920 case ACPI_INTERRUPT_INIT:
919 irq = create_irq(); 921 irq = create_irq();
920 if (irq < 0) 922 if (irq < 0)
921 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 923 panic("%s: out of interrupt vectors!\n", __func__);
922 vector = irq_to_vector(irq); 924 vector = irq_to_vector(irq);
923 delivery = IOSAPIC_INIT; 925 delivery = IOSAPIC_INIT;
924 break; 926 break;
@@ -929,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
929 mask = 1; 931 mask = 1;
930 break; 932 break;
931 default: 933 default:
932 printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, 934 printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
933 int_type); 935 int_type);
934 return -1; 936 return -1;
935 } 937 }
@@ -994,7 +996,7 @@ iosapic_system_init (int system_pcat_compat)
994 */ 996 */
995 printk(KERN_INFO 997 printk(KERN_INFO
996 "%s: Disabling PC-AT compatible 8259 interrupts\n", 998 "%s: Disabling PC-AT compatible 8259 interrupts\n",
997 __FUNCTION__); 999 __func__);
998 outb(0xff, 0xA1); 1000 outb(0xff, 0xA1);
999 outb(0xff, 0x21); 1001 outb(0xff, 0x21);
1000 } 1002 }
@@ -1009,7 +1011,7 @@ iosapic_alloc (void)
1009 if (!iosapic_lists[index].addr) 1011 if (!iosapic_lists[index].addr)
1010 return index; 1012 return index;
1011 1013
1012 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__); 1014 printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
1013 return -1; 1015 return -1;
1014} 1016}
1015 1017
@@ -1107,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base)
1107 index = find_iosapic(gsi_base); 1109 index = find_iosapic(gsi_base);
1108 if (index < 0) { 1110 if (index < 0) {
1109 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", 1111 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1110 __FUNCTION__, gsi_base); 1112 __func__, gsi_base);
1111 goto out; 1113 goto out;
1112 } 1114 }
1113 1115
1114 if (iosapic_lists[index].rtes_inuse) { 1116 if (iosapic_lists[index].rtes_inuse) {
1115 err = -EBUSY; 1117 err = -EBUSY;
1116 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", 1118 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1117 __FUNCTION__, gsi_base); 1119 __func__, gsi_base);
1118 goto out; 1120 goto out;
1119 } 1121 }
1120 1122
@@ -1135,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1135 index = find_iosapic(gsi_base); 1137 index = find_iosapic(gsi_base);
1136 if (index < 0) { 1138 if (index < 0) {
1137 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", 1139 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
1138 __FUNCTION__, gsi_base); 1140 __func__, gsi_base);
1139 return; 1141 return;
1140 } 1142 }
1141 iosapic_lists[index].node = node; 1143 iosapic_lists[index].node = node;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 0b52f19ed046..d8be23fbe6bc 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
260} 260}
261 261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
264
263static enum vector_domain_type { 265static enum vector_domain_type {
264 VECTOR_DOMAIN_NONE, 266 VECTOR_DOMAIN_NONE,
265 VECTOR_DOMAIN_PERCPU 267 VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
272 return CPU_MASK_ALL; 274 return CPU_MASK_ALL;
273} 275}
274 276
277static int __irq_prepare_move(int irq, int cpu)
278{
279 struct irq_cfg *cfg = &irq_cfg[irq];
280 int vector;
281 cpumask_t domain;
282
283 if (cfg->move_in_progress || cfg->move_cleanup_count)
284 return -EBUSY;
285 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
286 return -EINVAL;
287 if (cpu_isset(cpu, cfg->domain))
288 return 0;
289 domain = vector_allocation_domain(cpu);
290 vector = find_unassigned_vector(domain);
291 if (vector < 0)
292 return -ENOSPC;
293 cfg->move_in_progress = 1;
294 cfg->old_domain = cfg->domain;
295 cfg->vector = IRQ_VECTOR_UNASSIGNED;
296 cfg->domain = CPU_MASK_NONE;
297 BUG_ON(__bind_irq_vector(irq, vector, domain));
298 return 0;
299}
300
301int irq_prepare_move(int irq, int cpu)
302{
303 unsigned long flags;
304 int ret;
305
306 spin_lock_irqsave(&vector_lock, flags);
307 ret = __irq_prepare_move(irq, cpu);
308 spin_unlock_irqrestore(&vector_lock, flags);
309 return ret;
310}
311
312void irq_complete_move(unsigned irq)
313{
314 struct irq_cfg *cfg = &irq_cfg[irq];
315 cpumask_t cleanup_mask;
316 int i;
317
318 if (likely(!cfg->move_in_progress))
319 return;
320
321 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
322 return;
323
324 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
325 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
326 for_each_cpu_mask(i, cleanup_mask)
327 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
328 cfg->move_in_progress = 0;
329}
330
331static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
332{
333 int me = smp_processor_id();
334 ia64_vector vector;
335 unsigned long flags;
336
337 for (vector = IA64_FIRST_DEVICE_VECTOR;
338 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
339 int irq;
340 struct irq_desc *desc;
341 struct irq_cfg *cfg;
342 irq = __get_cpu_var(vector_irq)[vector];
343 if (irq < 0)
344 continue;
345
346 desc = irq_desc + irq;
347 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count)
350 goto unlock;
351
352 if (!cpu_isset(me, cfg->old_domain))
353 goto unlock;
354
355 spin_lock_irqsave(&vector_lock, flags);
356 __get_cpu_var(vector_irq)[vector] = -1;
357 cpu_clear(me, vector_table[vector]);
358 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--;
360 unlock:
361 spin_unlock(&desc->lock);
362 }
363 return IRQ_HANDLED;
364}
365
366static struct irqaction irq_move_irqaction = {
367 .handler = smp_irq_move_cleanup_interrupt,
368 .flags = IRQF_DISABLED,
369 .name = "irq_move"
370};
371
275static int __init parse_vector_domain(char *arg) 372static int __init parse_vector_domain(char *arg)
276{ 373{
277 if (!arg) 374 if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
303 spin_unlock_irqrestore(&vector_lock, flags); 400 spin_unlock_irqrestore(&vector_lock, flags);
304} 401}
305 402
306static int __reassign_irq_vector(int irq, int cpu)
307{
308 struct irq_cfg *cfg = &irq_cfg[irq];
309 int vector;
310 cpumask_t domain;
311
312 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
313 return -EINVAL;
314 if (cpu_isset(cpu, cfg->domain))
315 return 0;
316 domain = vector_allocation_domain(cpu);
317 vector = find_unassigned_vector(domain);
318 if (vector < 0)
319 return -ENOSPC;
320 __clear_irq_vector(irq);
321 BUG_ON(__bind_irq_vector(irq, vector, domain));
322 return 0;
323}
324
325int reassign_irq_vector(int irq, int cpu)
326{
327 unsigned long flags;
328 int ret;
329
330 spin_lock_irqsave(&vector_lock, flags);
331 ret = __reassign_irq_vector(irq, cpu);
332 spin_unlock_irqrestore(&vector_lock, flags);
333 return ret;
334}
335
336/* 403/*
337 * Dynamic irq allocate and deallocation for MSI 404 * Dynamic irq allocate and deallocation for MSI
338 */ 405 */
@@ -440,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
440 if (unlikely(irq < 0)) { 507 if (unlikely(irq < 0)) {
441 printk(KERN_ERR "%s: Unexpected interrupt " 508 printk(KERN_ERR "%s: Unexpected interrupt "
442 "vector %d on CPU %d is not mapped " 509 "vector %d on CPU %d is not mapped "
443 "to any IRQ!\n", __FUNCTION__, vector, 510 "to any IRQ!\n", __func__, vector,
444 smp_processor_id()); 511 smp_processor_id());
445 } else 512 } else
446 generic_handle_irq(irq); 513 generic_handle_irq(irq);
@@ -505,7 +572,7 @@ void ia64_process_pending_intr(void)
505 if (unlikely(irq < 0)) { 572 if (unlikely(irq < 0)) {
506 printk(KERN_ERR "%s: Unexpected interrupt " 573 printk(KERN_ERR "%s: Unexpected interrupt "
507 "vector %d on CPU %d not being mapped " 574 "vector %d on CPU %d not being mapped "
508 "to any IRQ!!\n", __FUNCTION__, vector, 575 "to any IRQ!!\n", __func__, vector,
509 smp_processor_id()); 576 smp_processor_id());
510 } else { 577 } else {
511 vectors_in_migration[irq]=0; 578 vectors_in_migration[irq]=0;
@@ -578,6 +645,13 @@ init_IRQ (void)
578 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 645 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
579 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 646 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
580 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 647 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
648#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
649 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
650 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
651 IA64_FIRST_DEVICE_VECTOR++;
652 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
653 }
654#endif
581#endif 655#endif
582#ifdef CONFIG_PERFMON 656#ifdef CONFIG_PERFMON
583 pfm_init_percpu(); 657 pfm_init_percpu();
@@ -592,11 +666,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
592 unsigned long ipi_data; 666 unsigned long ipi_data;
593 unsigned long phys_cpu_id; 667 unsigned long phys_cpu_id;
594 668
595#ifdef CONFIG_SMP
596 phys_cpu_id = cpu_physical_id(cpu); 669 phys_cpu_id = cpu_physical_id(cpu);
597#else
598 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
599#endif
600 670
601 /* 671 /*
602 * cpu number is in 8bit ID and 8bit EID 672 * cpu number is in 8bit ID and 8bit EID
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index b618487cdc85..8d9a446a0d17 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -838,7 +838,7 @@ out:
838 return 1; 838 return 1;
839} 839}
840 840
841int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) 841int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
842{ 842{
843 struct kprobe *cur = kprobe_running(); 843 struct kprobe *cur = kprobe_running();
844 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 844 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1001 return 1; 1001 return 1;
1002} 1002}
1003 1003
1004/* ia64 does not need this */
1005void __kprobes jprobe_return(void)
1006{
1007}
1008
1004int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 1009int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1005{ 1010{
1006 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1011 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6e17aed53135..6c18221dba36 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
413 IA64_LOG_INDEX_INC(sal_info_type); 413 IA64_LOG_INDEX_INC(sal_info_type);
414 IA64_LOG_UNLOCK(sal_info_type); 414 IA64_LOG_UNLOCK(sal_info_type);
415 if (irq_safe) { 415 if (irq_safe) {
416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
417 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 417 __func__, sal_info_type, total_len);
418 } 418 }
419 *buffer = (u8 *) log_buffer; 419 *buffer = (u8 *) log_buffer;
420 return total_len; 420 return total_len;
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
518 static DEFINE_SPINLOCK(cpe_history_lock); 518 static DEFINE_SPINLOCK(cpe_history_lock);
519 519
520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
521 __FUNCTION__, cpe_irq, smp_processor_id()); 521 __func__, cpe_irq, smp_processor_id());
522 522
523 /* SAL spec states this should run w/ interrupts enabled */ 523 /* SAL spec states this should run w/ interrupts enabled */
524 local_irq_enable(); 524 local_irq_enable();
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
594 } 594 }
595 595
596 IA64_MCA_DEBUG("%s: corrected platform error " 596 IA64_MCA_DEBUG("%s: corrected platform error "
597 "vector %#x registered\n", __FUNCTION__, cpev); 597 "vector %#x registered\n", __func__, cpev);
598} 598}
599#endif /* CONFIG_ACPI */ 599#endif /* CONFIG_ACPI */
600 600
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
621 cmcv.cmcv_vector = IA64_CMC_VECTOR; 621 cmcv.cmcv_vector = IA64_CMC_VECTOR;
622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
623 623
624 IA64_MCA_DEBUG("%s: CPU %d corrected " 624 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
625 "machine check vector %#x registered.\n", 625 __func__, smp_processor_id(), IA64_CMC_VECTOR);
626 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
627 626
628 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 627 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
629 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 628 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
630} 629}
631 630
632/* 631/*
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
651 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 650 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
652 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 651 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
653 652
654 IA64_MCA_DEBUG("%s: CPU %d corrected " 653 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
655 "machine check vector %#x disabled.\n", 654 __func__, smp_processor_id(), cmcv.cmcv_vector);
656 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
657} 655}
658 656
659/* 657/*
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 676 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
679 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 677 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
680 678
681 IA64_MCA_DEBUG("%s: CPU %d corrected " 679 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
682 "machine check vector %#x enabled.\n", 680 __func__, smp_processor_id(), cmcv.cmcv_vector);
683 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
684} 681}
685 682
686/* 683/*
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
767 local_irq_save(flags); 764 local_irq_save(flags);
768 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 765 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
769 (long)&nd, 0, 0) == NOTIFY_STOP) 766 (long)&nd, 0, 0) == NOTIFY_STOP)
770 ia64_mca_spin(__FUNCTION__); 767 ia64_mca_spin(__func__);
771 768
772 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 769 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
773 /* Register with the SAL monarch that the slave has 770 /* Register with the SAL monarch that the slave has
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
777 774
778 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 775 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
779 (long)&nd, 0, 0) == NOTIFY_STOP) 776 (long)&nd, 0, 0) == NOTIFY_STOP)
780 ia64_mca_spin(__FUNCTION__); 777 ia64_mca_spin(__func__);
781 778
782 /* Wait for the monarch cpu to exit. */ 779 /* Wait for the monarch cpu to exit. */
783 while (monarch_cpu != -1) 780 while (monarch_cpu != -1)
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
785 782
786 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 783 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
787 (long)&nd, 0, 0) == NOTIFY_STOP) 784 (long)&nd, 0, 0) == NOTIFY_STOP)
788 ia64_mca_spin(__FUNCTION__); 785 ia64_mca_spin(__func__);
789 786
790 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 787 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
791 /* Enable all interrupts */ 788 /* Enable all interrupts */
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1230 1227
1231 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1228 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1232 == NOTIFY_STOP) 1229 == NOTIFY_STOP)
1233 ia64_mca_spin(__FUNCTION__); 1230 ia64_mca_spin(__func__);
1234 1231
1235 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1232 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1236 if (sos->monarch) { 1233 if (sos->monarch) {
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1246 ia64_mca_wakeup_all(); 1243 ia64_mca_wakeup_all();
1247 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1244 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1248 == NOTIFY_STOP) 1245 == NOTIFY_STOP)
1249 ia64_mca_spin(__FUNCTION__); 1246 ia64_mca_spin(__func__);
1250 } else { 1247 } else {
1251 while (cpu_isset(cpu, mca_cpu)) 1248 while (cpu_isset(cpu, mca_cpu))
1252 cpu_relax(); /* spin until monarch wakes us */ 1249 cpu_relax(); /* spin until monarch wakes us */
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1276 } 1273 }
1277 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1278 == NOTIFY_STOP) 1275 == NOTIFY_STOP)
1279 ia64_mca_spin(__FUNCTION__); 1276 ia64_mca_spin(__func__);
1280 1277
1281 1278
1282 if (atomic_dec_return(&mca_count) > 0) { 1279 if (atomic_dec_return(&mca_count) > 0) {
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1328 static DEFINE_SPINLOCK(cmc_history_lock); 1325 static DEFINE_SPINLOCK(cmc_history_lock);
1329 1326
1330 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1327 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1331 __FUNCTION__, cmc_irq, smp_processor_id()); 1328 __func__, cmc_irq, smp_processor_id());
1332 1329
1333 /* SAL spec states this should run w/ interrupts enabled */ 1330 /* SAL spec states this should run w/ interrupts enabled */
1334 local_irq_enable(); 1331 local_irq_enable();
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1614 */ 1611 */
1615 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1612 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1616 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1613 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1617 __FUNCTION__, cpu); 1614 __func__, cpu);
1618 atomic_dec(&slaves); 1615 atomic_dec(&slaves);
1619 sos->monarch = 1; 1616 sos->monarch = 1;
1620 } 1617 }
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1626 */ 1623 */
1627 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1624 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1628 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1625 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1629 __FUNCTION__, cpu); 1626 __func__, cpu);
1630 atomic_dec(&monarchs); 1627 atomic_dec(&monarchs);
1631 sos->monarch = 0; 1628 sos->monarch = 0;
1632 } 1629 }
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1637 cpu_relax(); /* spin until monarch enters */ 1634 cpu_relax(); /* spin until monarch enters */
1638 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1635 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
1639 == NOTIFY_STOP) 1636 == NOTIFY_STOP)
1640 ia64_mca_spin(__FUNCTION__); 1637 ia64_mca_spin(__func__);
1641 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1638 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1642 == NOTIFY_STOP) 1639 == NOTIFY_STOP)
1643 ia64_mca_spin(__FUNCTION__); 1640 ia64_mca_spin(__func__);
1644 while (monarch_cpu != -1) 1641 while (monarch_cpu != -1)
1645 cpu_relax(); /* spin until monarch leaves */ 1642 cpu_relax(); /* spin until monarch leaves */
1646 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1643 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1647 == NOTIFY_STOP) 1644 == NOTIFY_STOP)
1648 ia64_mca_spin(__FUNCTION__); 1645 ia64_mca_spin(__func__);
1649 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1646 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1650 set_curr_task(cpu, previous_current); 1647 set_curr_task(cpu, previous_current);
1651 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1648 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1656 monarch_cpu = cpu; 1653 monarch_cpu = cpu;
1657 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1654 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
1658 == NOTIFY_STOP) 1655 == NOTIFY_STOP)
1659 ia64_mca_spin(__FUNCTION__); 1656 ia64_mca_spin(__func__);
1660 1657
1661 /* 1658 /*
1662 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1659 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1673 */ 1670 */
1674 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1671 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1675 == NOTIFY_STOP) 1672 == NOTIFY_STOP)
1676 ia64_mca_spin(__FUNCTION__); 1673 ia64_mca_spin(__func__);
1677 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1674 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1678 == NOTIFY_STOP) 1675 == NOTIFY_STOP)
1679 ia64_mca_spin(__FUNCTION__); 1676 ia64_mca_spin(__func__);
1680 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1677 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1681 atomic_dec(&monarchs); 1678 atomic_dec(&monarchs);
1682 set_curr_task(cpu, previous_current); 1679 set_curr_task(cpu, previous_current);
@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
1884 .priority = 0/* we need to notified last */ 1881 .priority = 0/* we need to notified last */
1885 }; 1882 };
1886 1883
1887 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1884 IA64_MCA_DEBUG("%s: begin\n", __func__);
1888 1885
1889 /* Clear the Rendez checkin flag for all cpus */ 1886 /* Clear the Rendez checkin flag for all cpus */
1890 for(i = 0 ; i < NR_CPUS; i++) 1887 for(i = 0 ; i < NR_CPUS; i++)
@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
1928 return; 1925 return;
1929 } 1926 }
1930 1927
1931 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1928 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1932 1929
1933 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1930 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1934 /* 1931 /*
@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
1949 return; 1946 return;
1950 } 1947 }
1951 1948
1952 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1949 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1953 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1950 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1954 1951
1955 /* 1952 /*
@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
1961 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1958 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1962 ia64_mc_info.imi_slave_init_handler_size = 0; 1959 ia64_mc_info.imi_slave_init_handler_size = 0;
1963 1960
1964 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1961 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1965 ia64_mc_info.imi_monarch_init_handler); 1962 ia64_mc_info.imi_monarch_init_handler);
1966 1963
1967 /* Register the os init handler with SAL */ 1964 /* Register the os init handler with SAL */
@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
1982 return; 1979 return;
1983 } 1980 }
1984 1981
1985 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1982 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
1986 1983
1987 /* 1984 /*
1988 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1985 * Configure the CMCI/P vector and handler. Interrupts for CMC are
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
2042 cmc_polling_enabled = 0; 2039 cmc_polling_enabled = 0;
2043 schedule_work(&cmc_enable_work); 2040 schedule_work(&cmc_enable_work);
2044 2041
2045 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 2042 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2046 2043
2047#ifdef CONFIG_ACPI 2044#ifdef CONFIG_ACPI
2048 /* Setup the CPEI/P vector and handler */ 2045 /* Setup the CPEI/P vector and handler */
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
2065 ia64_cpe_irq = irq; 2062 ia64_cpe_irq = irq;
2066 ia64_mca_register_cpev(cpe_vector); 2063 ia64_mca_register_cpev(cpe_vector);
2067 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 2064 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2068 __FUNCTION__); 2065 __func__);
2069 return 0; 2066 return 0;
2070 } 2067 }
2071 printk(KERN_ERR "%s: Failed to find irq for CPE " 2068 printk(KERN_ERR "%s: Failed to find irq for CPE "
2072 "interrupt handler, vector %d\n", 2069 "interrupt handler, vector %d\n",
2073 __FUNCTION__, cpe_vector); 2070 __func__, cpe_vector);
2074 } 2071 }
2075 /* If platform doesn't support CPEI, get the timer going. */ 2072 /* If platform doesn't support CPEI, get the timer going. */
2076 if (cpe_poll_enabled) { 2073 if (cpe_poll_enabled) {
2077 ia64_mca_cpe_poll(0UL); 2074 ia64_mca_cpe_poll(0UL);
2078 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 2075 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2079 } 2076 }
2080 } 2077 }
2081#endif 2078#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e58f4367cf11..e83e2ea3b3e0 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
493 mod->arch.opd->sh_addralign = 8; 493 mod->arch.opd->sh_addralign = 8;
494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); 494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", 495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
496 __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, 496 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
497 mod->arch.got->sh_size, mod->arch.opd->sh_size); 497 mod->arch.got->sh_size, mod->arch.opd->sh_size);
498 return 0; 498 return 0;
499} 499}
@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
585#if ARCH_MODULE_DEBUG 585#if ARCH_MODULE_DEBUG
586 if (plt_target(plt) != target_ip) { 586 if (plt_target(plt) != target_ip) {
587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n", 587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
588 __FUNCTION__, target_ip, plt_target(plt)); 588 __func__, target_ip, plt_target(plt));
589 *okp = 0; 589 *okp = 0;
590 return 0; 590 return 0;
591 } 591 }
@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
703 if (r_type == R_IA64_PCREL21BI) { 703 if (r_type == R_IA64_PCREL21BI) {
704 if (!is_internal(mod, val)) { 704 if (!is_internal(mod, val)) {
705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", 705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
706 __FUNCTION__, reloc_name[r_type], val); 706 __func__, reloc_name[r_type], val);
707 return -ENOEXEC; 707 return -ENOEXEC;
708 } 708 }
709 format = RF_INSN21B; 709 format = RF_INSN21B;
@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
737 case R_IA64_LDXMOV: 737 case R_IA64_LDXMOV:
738 if (gp_addressable(mod, val)) { 738 if (gp_addressable(mod, val)) {
739 /* turn "ld8" into "mov": */ 739 /* turn "ld8" into "mov": */
740 DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); 740 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); 741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
742 } 742 }
743 return 0; 743 return 0;
@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
771 if (!ok) 771 if (!ok)
772 return -ENOEXEC; 772 return -ENOEXEC;
773 773
774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val, 774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); 775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
776 776
777 switch (format) { 777 switch (format) {
@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
807 Elf64_Shdr *target_sec; 807 Elf64_Shdr *target_sec;
808 int ret; 808 int ret;
809 809
810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__, 810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
811 relsec, n, sechdrs[relsec].sh_info); 811 relsec, n, sechdrs[relsec].sh_info);
812 812
813 target_sec = sechdrs + sechdrs[relsec].sh_info; 813 target_sec = sechdrs + sechdrs[relsec].sh_info;
@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
835 gp = mod->core_size / 2; 835 gp = mod->core_size / 2;
836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
837 mod->arch.gp = gp; 837 mod->arch.gp = gp;
838 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); 838 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
839 } 839 }
840 840
841 for (i = 0; i < n; i++) { 841 for (i = 0; i < n; i++) {
@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod)
903 init = start + num_core; 903 init = start + num_core;
904 } 904 }
905 905
906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__, 906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
907 mod->name, mod->arch.gp, num_init, num_core); 907 mod->name, mod->arch.gp, num_init, num_core);
908 908
909 /* 909 /*
@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod)
912 if (num_core > 0) { 912 if (num_core > 0) {
913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
914 core, core + num_core); 914 core, core + num_core);
915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__, 915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
916 mod->arch.core_unw_table, core, core + num_core); 916 mod->arch.core_unw_table, core, core + num_core);
917 } 917 }
918 if (num_init > 0) { 918 if (num_init > 0) {
919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, 919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
920 init, init + num_init); 920 init, init + num_init);
921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__, 921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
922 mod->arch.init_unw_table, init, init + num_init); 922 mod->arch.init_unw_table, init, init + num_init);
923 } 923 }
924} 924}
@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod)
926int 926int
927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) 927module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
928{ 928{
929 DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init); 929 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
930 if (mod->arch.unwind) 930 if (mod->arch.unwind)
931 register_unwind_table(mod); 931 register_unwind_table(mod);
932 return 0; 932 return 0;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index e86d02959794..60c6ef67ebb2 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
57 if (!cpu_online(cpu)) 57 if (!cpu_online(cpu))
58 return; 58 return;
59 59
60 if (reassign_irq_vector(irq, cpu)) 60 if (irq_prepare_move(irq, cpu))
61 return; 61 return;
62 62
63 read_msi_msg(irq, &msg); 63 read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
119 119
120static void ia64_ack_msi_irq(unsigned int irq) 120static void ia64_ack_msi_irq(unsigned int irq)
121{ 121{
122 irq_complete_move(irq);
122 move_native_irq(irq); 123 move_native_irq(irq);
123 ia64_eoi(); 124 ia64_eoi();
124} 125}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f6b99719f10f..a2aabfdc80d9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -227,12 +227,12 @@
227#ifdef PFM_DEBUGGING 227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \ 228#define DPRINT(a) \
229 do { \ 229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0) 231 } while (0)
232 232
233#define DPRINT_ovfl(a) \ 233#define DPRINT_ovfl(a) \
234 do { \ 234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0) 236 } while (0)
237#endif 237#endif
238 238
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index a7af1cb419f9..5f637bbfcccd 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL");
24#ifdef DEFAULT_DEBUG 24#ifdef DEFAULT_DEBUG
25#define DPRINT(a) \ 25#define DPRINT(a) \
26 do { \ 26 do { \
27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 27 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
28 } while (0) 28 } while (0)
29 29
30#define DPRINT_ovfl(a) \ 30#define DPRINT_ovfl(a) \
31 do { \ 31 do { \
32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 32 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
33 } while (0) 33 } while (0)
34 34
35#else 35#else
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 331d6768b5d5..ab784ec4319d 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -698,52 +698,6 @@ thread_matches (struct task_struct *thread, unsigned long addr)
698} 698}
699 699
700/* 700/*
701 * GDB apparently wants to be able to read the register-backing store
702 * of any thread when attached to a given process. If we are peeking
703 * or poking an address that happens to reside in the kernel-backing
704 * store of another thread, we need to attach to that thread, because
705 * otherwise we end up accessing stale data.
706 *
707 * task_list_lock must be read-locked before calling this routine!
708 */
709static struct task_struct *
710find_thread_for_addr (struct task_struct *child, unsigned long addr)
711{
712 struct task_struct *p;
713 struct mm_struct *mm;
714 struct list_head *this, *next;
715 int mm_users;
716
717 if (!(mm = get_task_mm(child)))
718 return child;
719
720 /* -1 because of our get_task_mm(): */
721 mm_users = atomic_read(&mm->mm_users) - 1;
722 if (mm_users <= 1)
723 goto out; /* not multi-threaded */
724
725 /*
726 * Traverse the current process' children list. Every task that
727 * one attaches to becomes a child. And it is only attached children
728 * of the debugger that are of interest (ptrace_check_attach checks
729 * for this).
730 */
731 list_for_each_safe(this, next, &current->children) {
732 p = list_entry(this, struct task_struct, sibling);
733 if (p->tgid != child->tgid)
734 continue;
735 if (thread_matches(p, addr)) {
736 child = p;
737 goto out;
738 }
739 }
740
741 out:
742 mmput(mm);
743 return child;
744}
745
746/*
747 * Write f32-f127 back to task->thread.fph if it has been modified. 701 * Write f32-f127 back to task->thread.fph if it has been modified.
748 */ 702 */
749inline void 703inline void
@@ -826,14 +780,14 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
826 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
827 < IA64_PT_REGS_SIZE) { 781 < IA64_PT_REGS_SIZE) {
828 dprintk("ptrace.%s: ran off the top of the kernel " 782 dprintk("ptrace.%s: ran off the top of the kernel "
829 "stack\n", __FUNCTION__); 783 "stack\n", __func__);
830 return; 784 return;
831 } 785 }
832 if (unw_get_pr (&prev_info, &pr) < 0) { 786 if (unw_get_pr (&prev_info, &pr) < 0) {
833 unw_get_rp(&prev_info, &ip); 787 unw_get_rp(&prev_info, &ip);
834 dprintk("ptrace.%s: failed to read " 788 dprintk("ptrace.%s: failed to read "
835 "predicate register (ip=0x%lx)\n", 789 "predicate register (ip=0x%lx)\n",
836 __FUNCTION__, ip); 790 __func__, ip);
837 return; 791 return;
838 } 792 }
839 if (unw_is_intr_frame(&info) 793 if (unw_is_intr_frame(&info)
@@ -908,7 +862,7 @@ static int
908access_uarea (struct task_struct *child, unsigned long addr, 862access_uarea (struct task_struct *child, unsigned long addr,
909 unsigned long *data, int write_access) 863 unsigned long *data, int write_access)
910{ 864{
911 unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm; 865 unsigned long *ptr, regnum, urbs_end, cfm;
912 struct switch_stack *sw; 866 struct switch_stack *sw;
913 struct pt_regs *pt; 867 struct pt_regs *pt;
914# define pt_reg_addr(pt, reg) ((void *) \ 868# define pt_reg_addr(pt, reg) ((void *) \
@@ -1011,14 +965,9 @@ access_uarea (struct task_struct *child, unsigned long addr,
1011 * the kernel was entered. 965 * the kernel was entered.
1012 * 966 *
1013 * Furthermore, when changing the contents of 967 * Furthermore, when changing the contents of
1014 * PT_AR_BSP (or PT_CFM) we MUST copy any 968 * PT_AR_BSP (or PT_CFM) while the task is
1015 * users-level stacked registers that are 969 * blocked in a system call, convert the state
1016 * stored on the kernel stack back to 970 * so that the non-system-call exit
1017 * user-space because otherwise, we might end
1018 * up clobbering kernel stacked registers.
1019 * Also, if this happens while the task is
1020 * blocked in a system call, which convert the
1021 * state such that the non-system-call exit
1022 * path is used. This ensures that the proper 971 * path is used. This ensures that the proper
1023 * state will be picked up when resuming 972 * state will be picked up when resuming
1024 * execution. However, it *also* means that 973 * execution. However, it *also* means that
@@ -1035,10 +984,6 @@ access_uarea (struct task_struct *child, unsigned long addr,
1035 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); 984 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
1036 if (write_access) { 985 if (write_access) {
1037 if (*data != urbs_end) { 986 if (*data != urbs_end) {
1038 if (ia64_sync_user_rbs(child, sw,
1039 pt->ar_bspstore,
1040 urbs_end) < 0)
1041 return -1;
1042 if (in_syscall(pt)) 987 if (in_syscall(pt))
1043 convert_to_non_syscall(child, 988 convert_to_non_syscall(child,
1044 pt, 989 pt,
@@ -1058,10 +1003,6 @@ access_uarea (struct task_struct *child, unsigned long addr,
1058 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); 1003 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
1059 if (write_access) { 1004 if (write_access) {
1060 if (((cfm ^ *data) & PFM_MASK) != 0) { 1005 if (((cfm ^ *data) & PFM_MASK) != 0) {
1061 if (ia64_sync_user_rbs(child, sw,
1062 pt->ar_bspstore,
1063 urbs_end) < 0)
1064 return -1;
1065 if (in_syscall(pt)) 1006 if (in_syscall(pt))
1066 convert_to_non_syscall(child, 1007 convert_to_non_syscall(child,
1067 pt, 1008 pt,
@@ -1093,16 +1034,8 @@ access_uarea (struct task_struct *child, unsigned long addr,
1093 return 0; 1034 return 0;
1094 1035
1095 case PT_AR_RNAT: 1036 case PT_AR_RNAT:
1096 urbs_end = ia64_get_user_rbs_end(child, pt, NULL); 1037 ptr = pt_reg_addr(pt, ar_rnat);
1097 rnat_addr = (long) ia64_rse_rnat_addr((long *) 1038 break;
1098 urbs_end);
1099 if (write_access)
1100 return ia64_poke(child, sw, urbs_end,
1101 rnat_addr, *data);
1102 else
1103 return ia64_peek(child, sw, urbs_end,
1104 rnat_addr, data);
1105
1106 case PT_R1: 1039 case PT_R1:
1107 ptr = pt_reg_addr(pt, r1); 1040 ptr = pt_reg_addr(pt, r1);
1108 break; 1041 break;
@@ -1521,215 +1454,97 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1521 return ret; 1454 return ret;
1522} 1455}
1523 1456
1524/*
1525 * Called by kernel/ptrace.c when detaching..
1526 *
1527 * Make sure the single step bit is not set.
1528 */
1529void 1457void
1530ptrace_disable (struct task_struct *child) 1458user_enable_single_step (struct task_struct *child)
1531{ 1459{
1532 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1460 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1533 1461
1534 /* make sure the single step/taken-branch trap bits are not set: */ 1462 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1535 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 1463 child_psr->ss = 1;
1536 child_psr->ss = 0;
1537 child_psr->tb = 0;
1538} 1464}
1539 1465
1540asmlinkage long 1466void
1541sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) 1467user_enable_block_step (struct task_struct *child)
1542{ 1468{
1543 struct pt_regs *pt; 1469 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1544 unsigned long urbs_end, peek_or_poke;
1545 struct task_struct *child;
1546 struct switch_stack *sw;
1547 long ret;
1548 struct unw_frame_info info;
1549 1470
1550 lock_kernel(); 1471 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1551 ret = -EPERM; 1472 child_psr->tb = 1;
1552 if (request == PTRACE_TRACEME) { 1473}
1553 ret = ptrace_traceme();
1554 goto out;
1555 }
1556 1474
1557 peek_or_poke = (request == PTRACE_PEEKTEXT 1475void
1558 || request == PTRACE_PEEKDATA 1476user_disable_single_step (struct task_struct *child)
1559 || request == PTRACE_POKETEXT 1477{
1560 || request == PTRACE_POKEDATA); 1478 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1561 ret = -ESRCH;
1562 read_lock(&tasklist_lock);
1563 {
1564 child = find_task_by_pid(pid);
1565 if (child) {
1566 if (peek_or_poke)
1567 child = find_thread_for_addr(child, addr);
1568 get_task_struct(child);
1569 }
1570 }
1571 read_unlock(&tasklist_lock);
1572 if (!child)
1573 goto out;
1574 ret = -EPERM;
1575 if (pid == 1) /* no messing around with init! */
1576 goto out_tsk;
1577
1578 if (request == PTRACE_ATTACH) {
1579 ret = ptrace_attach(child);
1580 if (!ret)
1581 arch_ptrace_attach(child);
1582 goto out_tsk;
1583 }
1584 1479
1585 ret = ptrace_check_attach(child, request == PTRACE_KILL); 1480 /* make sure the single step/taken-branch trap bits are not set: */
1586 if (ret < 0) 1481 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1587 goto out_tsk; 1482 child_psr->ss = 0;
1483 child_psr->tb = 0;
1484}
1588 1485
1589 pt = task_pt_regs(child); 1486/*
1590 sw = (struct switch_stack *) (child->thread.ksp + 16); 1487 * Called by kernel/ptrace.c when detaching..
1488 *
1489 * Make sure the single step bit is not set.
1490 */
1491void
1492ptrace_disable (struct task_struct *child)
1493{
1494 user_disable_single_step(child);
1495}
1591 1496
1497long
1498arch_ptrace (struct task_struct *child, long request, long addr, long data)
1499{
1592 switch (request) { 1500 switch (request) {
1593 case PTRACE_PEEKTEXT: 1501 case PTRACE_PEEKTEXT:
1594 case PTRACE_PEEKDATA: 1502 case PTRACE_PEEKDATA:
1595 /* read word at location addr */ 1503 /* read word at location addr */
1596 urbs_end = ia64_get_user_rbs_end(child, pt, NULL); 1504 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1597 ret = ia64_peek(child, sw, urbs_end, addr, &data); 1505 != sizeof(data))
1598 if (ret == 0) { 1506 return -EIO;
1599 ret = data; 1507 /* ensure return value is not mistaken for error code */
1600 /* ensure "ret" is not mistaken as an error code: */ 1508 force_successful_syscall_return();
1601 force_successful_syscall_return(); 1509 return data;
1602 }
1603 goto out_tsk;
1604
1605 case PTRACE_POKETEXT:
1606 case PTRACE_POKEDATA:
1607 /* write the word at location addr */
1608 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1609 ret = ia64_poke(child, sw, urbs_end, addr, data);
1610
1611 /* Make sure user RBS has the latest data */
1612 unw_init_from_blocked_task(&info, child);
1613 do_sync_rbs(&info, ia64_sync_user_rbs);
1614 1510
1615 goto out_tsk; 1511 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1512 * by the generic ptrace_request().
1513 */
1616 1514
1617 case PTRACE_PEEKUSR: 1515 case PTRACE_PEEKUSR:
1618 /* read the word at addr in the USER area */ 1516 /* read the word at addr in the USER area */
1619 if (access_uarea(child, addr, &data, 0) < 0) { 1517 if (access_uarea(child, addr, &data, 0) < 0)
1620 ret = -EIO; 1518 return -EIO;
1621 goto out_tsk; 1519 /* ensure return value is not mistaken for error code */
1622 }
1623 ret = data;
1624 /* ensure "ret" is not mistaken as an error code */
1625 force_successful_syscall_return(); 1520 force_successful_syscall_return();
1626 goto out_tsk; 1521 return data;
1627 1522
1628 case PTRACE_POKEUSR: 1523 case PTRACE_POKEUSR:
1629 /* write the word at addr in the USER area */ 1524 /* write the word at addr in the USER area */
1630 if (access_uarea(child, addr, &data, 1) < 0) { 1525 if (access_uarea(child, addr, &data, 1) < 0)
1631 ret = -EIO; 1526 return -EIO;
1632 goto out_tsk; 1527 return 0;
1633 }
1634 ret = 0;
1635 goto out_tsk;
1636 1528
1637 case PTRACE_OLD_GETSIGINFO: 1529 case PTRACE_OLD_GETSIGINFO:
1638 /* for backwards-compatibility */ 1530 /* for backwards-compatibility */
1639 ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); 1531 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1640 goto out_tsk;
1641 1532
1642 case PTRACE_OLD_SETSIGINFO: 1533 case PTRACE_OLD_SETSIGINFO:
1643 /* for backwards-compatibility */ 1534 /* for backwards-compatibility */
1644 ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); 1535 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1645 goto out_tsk;
1646
1647 case PTRACE_SYSCALL:
1648 /* continue and stop at next (return from) syscall */
1649 case PTRACE_CONT:
1650 /* restart after signal. */
1651 ret = -EIO;
1652 if (!valid_signal(data))
1653 goto out_tsk;
1654 if (request == PTRACE_SYSCALL)
1655 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1656 else
1657 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1658 child->exit_code = data;
1659 1536
1660 /* 1537 case PTRACE_GETREGS:
1661 * Make sure the single step/taken-branch trap bits 1538 return ptrace_getregs(child,
1662 * are not set: 1539 (struct pt_all_user_regs __user *) data);
1663 */
1664 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1665 ia64_psr(pt)->ss = 0;
1666 ia64_psr(pt)->tb = 0;
1667 1540
1668 wake_up_process(child); 1541 case PTRACE_SETREGS:
1669 ret = 0; 1542 return ptrace_setregs(child,
1670 goto out_tsk; 1543 (struct pt_all_user_regs __user *) data);
1671 1544
1672 case PTRACE_KILL: 1545 default:
1673 /* 1546 return ptrace_request(child, request, addr, data);
1674 * Make the child exit. Best I can do is send it a
1675 * sigkill. Perhaps it should be put in the status
1676 * that it wants to exit.
1677 */
1678 if (child->exit_state == EXIT_ZOMBIE)
1679 /* already dead */
1680 goto out_tsk;
1681 child->exit_code = SIGKILL;
1682
1683 ptrace_disable(child);
1684 wake_up_process(child);
1685 ret = 0;
1686 goto out_tsk;
1687
1688 case PTRACE_SINGLESTEP:
1689 /* let child execute for one instruction */
1690 case PTRACE_SINGLEBLOCK:
1691 ret = -EIO;
1692 if (!valid_signal(data))
1693 goto out_tsk;
1694
1695 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1696 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1697 if (request == PTRACE_SINGLESTEP) {
1698 ia64_psr(pt)->ss = 1;
1699 } else {
1700 ia64_psr(pt)->tb = 1;
1701 }
1702 child->exit_code = data;
1703
1704 /* give it a chance to run. */
1705 wake_up_process(child);
1706 ret = 0;
1707 goto out_tsk;
1708
1709 case PTRACE_DETACH:
1710 /* detach a process that was attached. */
1711 ret = ptrace_detach(child, data);
1712 goto out_tsk;
1713
1714 case PTRACE_GETREGS:
1715 ret = ptrace_getregs(child,
1716 (struct pt_all_user_regs __user *) data);
1717 goto out_tsk;
1718
1719 case PTRACE_SETREGS:
1720 ret = ptrace_setregs(child,
1721 (struct pt_all_user_regs __user *) data);
1722 goto out_tsk;
1723
1724 default:
1725 ret = ptrace_request(child, request, addr, data);
1726 goto out_tsk;
1727 } 1547 }
1728 out_tsk:
1729 put_task_struct(child);
1730 out:
1731 unlock_kernel();
1732 return ret;
1733} 1548}
1734 1549
1735 1550
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index f44fe8412162..a3022dc48ef8 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
109 sal_revision = SAL_VERSION_CODE(2, 8); 109 sal_revision = SAL_VERSION_CODE(2, 8);
110 sal_version = SAL_VERSION_CODE(0, 0); 110 sal_version = SAL_VERSION_CODE(0, 0);
111 } 111 }
112
113 if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
114 /*
115 * SGI Altix has hard-coded version 2.9 in their prom
116 * but they actually implement 3.2, so let's fix it here.
117 */
118 sal_revision = SAL_VERSION_CODE(3, 2);
112} 119}
113 120
114static void __init 121static void __init
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ebd1a09f3201..4aa9eaea76c3 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model)
690 if (overflow++ == 0) 690 if (overflow++ == 0)
691 printk(KERN_ERR 691 printk(KERN_ERR
692 "%s: Table overflow. Some processor model information will be missing\n", 692 "%s: Table overflow. Some processor model information will be missing\n",
693 __FUNCTION__); 693 __func__);
694 return "Unknown"; 694 return "Unknown";
695} 695}
696 696
@@ -785,7 +785,7 @@ get_max_cacheline_size (void)
785 status = ia64_pal_cache_summary(&levels, &unique_caches); 785 status = ia64_pal_cache_summary(&levels, &unique_caches);
786 if (status != 0) { 786 if (status != 0) {
787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 787 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
788 __FUNCTION__, status); 788 __func__, status);
789 max = SMP_CACHE_BYTES; 789 max = SMP_CACHE_BYTES;
790 /* Safest setup for "flush_icache_range()" */ 790 /* Safest setup for "flush_icache_range()" */
791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 791 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
@@ -798,7 +798,7 @@ get_max_cacheline_size (void)
798 if (status != 0) { 798 if (status != 0) {
799 printk(KERN_ERR 799 printk(KERN_ERR
800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 800 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
801 __FUNCTION__, l, status); 801 __func__, l, status);
802 max = SMP_CACHE_BYTES; 802 max = SMP_CACHE_BYTES;
803 /* The safest setup for "flush_icache_range()" */ 803 /* The safest setup for "flush_icache_range()" */
804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 804 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
@@ -814,7 +814,7 @@ get_max_cacheline_size (void)
814 if (status != 0) { 814 if (status != 0) {
815 printk(KERN_ERR 815 printk(KERN_ERR
816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 816 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
817 __FUNCTION__, l, status); 817 __func__, l, status);
818 /* The safest setup for "flush_icache_range()" */ 818 /* The safest setup for "flush_icache_range()" */
819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 819 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
820 } 820 }
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 309da3567bc8..5740296c35af 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
342 342
343 new_sp = scr->pt.r12; 343 new_sp = scr->pt.r12;
344 tramp_addr = (unsigned long) __kernel_sigtramp; 344 tramp_addr = (unsigned long) __kernel_sigtramp;
345 if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { 345 if (ka->sa.sa_flags & SA_ONSTACK) {
346 new_sp = current->sas_ss_sp + current->sas_ss_size; 346 int onstack = sas_ss_flags(new_sp);
347 /* 347
348 * We need to check for the register stack being on the signal stack 348 if (onstack == 0) {
349 * separately, because it's switched separately (memory stack is switched 349 new_sp = current->sas_ss_sp + current->sas_ss_size;
350 * in the kernel, register stack is switched in the signal trampoline). 350 /*
351 */ 351 * We need to check for the register stack being on the
352 if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) 352 * signal stack separately, because it's switched
353 new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); 353 * separately (memory stack is switched in the kernel,
354 * register stack is switched in the signal trampoline).
355 */
356 if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
357 new_rbs = ALIGN(current->sas_ss_sp,
358 sizeof(long));
359 } else if (onstack == SS_ONSTACK) {
360 unsigned long check_sp;
361
362 /*
363 * If we are on the alternate signal stack and would
364 * overflow it, don't. Return an always-bogus address
365 * instead so we will die with SIGSEGV.
366 */
367 check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
368 if (!likely(on_sig_stack(check_sp)))
369 return force_sigsegv_info(sig, (void __user *)
370 check_sp);
371 }
354 } 372 }
355 frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); 373 frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
356 374
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 52f70bbc192a..6903361d11a5 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
28#undef DEBUG_UNALIGNED_TRAP 28#undef DEBUG_UNALIGNED_TRAP
29 29
30#ifdef DEBUG_UNALIGNED_TRAP 30#ifdef DEBUG_UNALIGNED_TRAP
31# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0) 31# define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
32# define DDUMP(str,vp,len) dump(str, vp, len) 32# define DDUMP(str,vp,len) dump(str, vp, len)
33 33
34static void 34static void
@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
674 * just in case. 674 * just in case.
675 */ 675 */
676 if (ld.x6_op == 1 || ld.x6_op == 3) { 676 if (ld.x6_op == 1 || ld.x6_op == 3) {
677 printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); 677 printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
678 if (die_if_kernel("unaligned reference on speculative load with register update\n", 678 if (die_if_kernel("unaligned reference on speculative load with register update\n",
679 regs, 30)) 679 regs, 30))
680 return; 680 return;
@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
1104 */ 1104 */
1105 if (ld.x6_op == 1 || ld.x6_op == 3) 1105 if (ld.x6_op == 1 || ld.x6_op == 3)
1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n", 1106 printk(KERN_ERR "%s: register update on speculative load pair, error\n",
1107 __FUNCTION__); 1107 __func__);
1108 1108
1109 setreg(ld.r3, ifa, 0, regs); 1109 setreg(ld.r3, ifa, 0, regs);
1110 } 1110 }
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index c1bdb5131814..67810b77d998 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -257,7 +257,7 @@ pt_regs_off (unsigned long reg)
257 off = unw.pt_regs_offsets[reg]; 257 off = unw.pt_regs_offsets[reg];
258 258
259 if (off < 0) { 259 if (off < 0) {
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg); 260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
261 off = 0; 261 off = 0;
262 } 262 }
263 return (unsigned long) off; 263 return (unsigned long) off;
@@ -268,13 +268,13 @@ get_scratch_regs (struct unw_frame_info *info)
268{ 268{
269 if (!info->pt) { 269 if (!info->pt) {
270 /* This should not happen with valid unwind info. */ 270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__); 271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME) 272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); 273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
274 else 274 else
275 info->pt = info->sp - 16; 275 info->pt = info->sp - 16;
276 } 276 }
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt); 277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt; 278 return (struct pt_regs *) info->pt;
279} 279}
280 280
@@ -294,7 +294,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
294 return 0; 294 return 0;
295 } 295 }
296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", 296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 __FUNCTION__, regnum); 297 __func__, regnum);
298 return -1; 298 return -1;
299 } 299 }
300 300
@@ -341,7 +341,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
341 { 341 {
342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk " 342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 "[0x%lx-0x%lx)\n", 343 "[0x%lx-0x%lx)\n",
344 __FUNCTION__, (void *) addr, 344 __func__, (void *) addr,
345 info->regstk.limit, 345 info->regstk.limit,
346 info->regstk.top); 346 info->regstk.top);
347 return -1; 347 return -1;
@@ -374,7 +374,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
374 || (unsigned long) addr >= info->regstk.top) 374 || (unsigned long) addr >= info->regstk.top)
375 { 375 {
376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " 376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 "of rbs\n", __FUNCTION__); 377 "of rbs\n", __func__);
378 return -1; 378 return -1;
379 } 379 }
380 if ((unsigned long) nat_addr >= info->regstk.top) 380 if ((unsigned long) nat_addr >= info->regstk.top)
@@ -385,7 +385,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
385 if (write) { 385 if (write) {
386 if (read_only(addr)) { 386 if (read_only(addr)) {
387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
388 __FUNCTION__); 388 __func__);
389 } else { 389 } else {
390 *addr = *val; 390 *addr = *val;
391 if (*nat) 391 if (*nat)
@@ -427,13 +427,13 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int
427 427
428 default: 428 default:
429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", 429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 __FUNCTION__, regnum); 430 __func__, regnum);
431 return -1; 431 return -1;
432 } 432 }
433 if (write) 433 if (write)
434 if (read_only(addr)) { 434 if (read_only(addr)) {
435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
436 __FUNCTION__); 436 __func__);
437 } else 437 } else
438 *addr = *val; 438 *addr = *val;
439 else 439 else
@@ -450,7 +450,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
450 450
451 if ((unsigned) (regnum - 2) >= 126) { 451 if ((unsigned) (regnum - 2) >= 126) {
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", 452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 __FUNCTION__, regnum); 453 __func__, regnum);
454 return -1; 454 return -1;
455 } 455 }
456 456
@@ -482,7 +482,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
482 if (write) 482 if (write)
483 if (read_only(addr)) { 483 if (read_only(addr)) {
484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
485 __FUNCTION__); 485 __func__);
486 } else 486 } else
487 *addr = *val; 487 *addr = *val;
488 else 488 else
@@ -572,14 +572,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
572 572
573 default: 573 default:
574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", 574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 __FUNCTION__, regnum); 575 __func__, regnum);
576 return -1; 576 return -1;
577 } 577 }
578 578
579 if (write) { 579 if (write) {
580 if (read_only(addr)) { 580 if (read_only(addr)) {
581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
582 __FUNCTION__); 582 __func__);
583 } else 583 } else
584 *addr = *val; 584 *addr = *val;
585 } else 585 } else
@@ -600,7 +600,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
600 if (write) { 600 if (write) {
601 if (read_only(addr)) { 601 if (read_only(addr)) {
602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", 602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
603 __FUNCTION__); 603 __func__);
604 } else 604 } else
605 *addr = *val; 605 *addr = *val;
606 } else 606 } else
@@ -699,7 +699,7 @@ decode_abreg (unsigned char abreg, int memory)
699 default: 699 default:
700 break; 700 break;
701 } 701 }
702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg); 702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
703 return UNW_REG_LC; 703 return UNW_REG_LC;
704} 704}
705 705
@@ -739,7 +739,7 @@ spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word
739 return; 739 return;
740 } 740 }
741 } 741 }
742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__); 742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__);
743} 743}
744 744
745static inline void 745static inline void
@@ -855,11 +855,11 @@ desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
855{ 855{
856 if (abi == 3 && context == 'i') { 856 if (abi == 3 && context == 'i') {
857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME; 857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__); 858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__);
859 } 859 }
860 else 860 else
861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", 861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 __FUNCTION__, abi, context); 862 __func__, abi, context);
863} 863}
864 864
865static inline void 865static inline void
@@ -1347,7 +1347,7 @@ script_emit (struct unw_script *script, struct unw_insn insn)
1347{ 1347{
1348 if (script->count >= UNW_MAX_SCRIPT_LEN) { 1348 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", 1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 __FUNCTION__, UNW_MAX_SCRIPT_LEN); 1350 __func__, UNW_MAX_SCRIPT_LEN);
1351 return; 1351 return;
1352 } 1352 }
1353 script->insn[script->count++] = insn; 1353 script->insn[script->count++] = insn;
@@ -1389,7 +1389,7 @@ emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1389 1389
1390 default: 1390 default:
1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", 1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 __FUNCTION__, r->where); 1392 __func__, r->where);
1393 return; 1393 return;
1394 } 1394 }
1395 insn.opc = opc; 1395 insn.opc = opc;
@@ -1446,7 +1446,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6); 1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447 else 1447 else
1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", 1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 __FUNCTION__, rval); 1449 __func__, rval);
1450 } 1450 }
1451 break; 1451 break;
1452 1452
@@ -1474,7 +1474,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1474 1474
1475 default: 1475 default:
1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", 1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 __FUNCTION__, i, r->where); 1477 __func__, i, r->where);
1478 break; 1478 break;
1479 } 1479 }
1480 insn.opc = opc; 1480 insn.opc = opc;
@@ -1547,10 +1547,10 @@ build_script (struct unw_frame_info *info)
1547 r->when = UNW_WHEN_NEVER; 1547 r->when = UNW_WHEN_NEVER;
1548 sr.pr_val = info->pr; 1548 sr.pr_val = info->pr;
1549 1549
1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip); 1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1551 script = script_new(ip); 1551 script = script_new(ip);
1552 if (!script) { 1552 if (!script) {
1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__); 1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__);
1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start); 1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555 return NULL; 1555 return NULL;
1556 } 1556 }
@@ -1569,7 +1569,7 @@ build_script (struct unw_frame_info *info)
1569 if (!e) { 1569 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", 1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1572 __FUNCTION__, ip, unw.cache[info->prev_script].ip); 1572 __func__, ip, unw.cache[info->prev_script].ip);
1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; 1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1574 sr.curr.reg[UNW_REG_RP].when = -1; 1574 sr.curr.reg[UNW_REG_RP].when = -1;
1575 sr.curr.reg[UNW_REG_RP].val = 0; 1575 sr.curr.reg[UNW_REG_RP].val = 0;
@@ -1618,13 +1618,13 @@ build_script (struct unw_frame_info *info)
1618 sr.curr.reg[UNW_REG_RP].when = -1; 1618 sr.curr.reg[UNW_REG_RP].when = -1;
1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; 1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", 1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1621 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where, 1621 __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1622 sr.curr.reg[UNW_REG_RP].val); 1622 sr.curr.reg[UNW_REG_RP].val);
1623 } 1623 }
1624 1624
1625#ifdef UNW_DEBUG 1625#ifdef UNW_DEBUG
1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", 1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1627 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target); 1627 __func__, table->segment_base + e->start_offset, sr.when_target);
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { 1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { 1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); 1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
@@ -1746,7 +1746,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1746 } else { 1746 } else {
1747 s[dst] = 0; 1747 s[dst] = 0;
1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", 1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1749 __FUNCTION__, dst, val); 1749 __func__, dst, val);
1750 } 1750 }
1751 break; 1751 break;
1752 1752
@@ -1756,7 +1756,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1756 else { 1756 else {
1757 s[dst] = 0; 1757 s[dst] = 0;
1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", 1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1759 __FUNCTION__, val); 1759 __func__, val);
1760 } 1760 }
1761 break; 1761 break;
1762 1762
@@ -1791,7 +1791,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
1791 || s[val] < TASK_SIZE) 1791 || s[val] < TASK_SIZE)
1792 { 1792 {
1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", 1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1794 __FUNCTION__, s[val]); 1794 __func__, s[val]);
1795 break; 1795 break;
1796 } 1796 }
1797#endif 1797#endif
@@ -1825,7 +1825,7 @@ find_save_locs (struct unw_frame_info *info)
1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { 1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1826 /* don't let obviously bad addresses pollute the cache */ 1826 /* don't let obviously bad addresses pollute the cache */
1827 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1827 /* FIXME: should really be level 0 but it occurs too often. KAO */
1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip); 1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1829 info->rp_loc = NULL; 1829 info->rp_loc = NULL;
1830 return -1; 1830 return -1;
1831 } 1831 }
@@ -1838,7 +1838,7 @@ find_save_locs (struct unw_frame_info *info)
1838 spin_unlock_irqrestore(&unw.lock, flags); 1838 spin_unlock_irqrestore(&unw.lock, flags);
1839 UNW_DPRINT(0, 1839 UNW_DPRINT(0,
1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n", 1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1841 __FUNCTION__, info->ip); 1841 __func__, info->ip);
1842 return -1; 1842 return -1;
1843 } 1843 }
1844 have_write_lock = 1; 1844 have_write_lock = 1;
@@ -1882,21 +1882,21 @@ unw_unwind (struct unw_frame_info *info)
1882 if (!unw_valid(info, info->rp_loc)) { 1882 if (!unw_valid(info, info->rp_loc)) {
1883 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1883 /* FIXME: should really be level 0 but it occurs too often. KAO */
1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", 1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1885 __FUNCTION__, info->ip); 1885 __func__, info->ip);
1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 return -1; 1887 return -1;
1888 } 1888 }
1889 /* restore the ip */ 1889 /* restore the ip */
1890 ip = info->ip = *info->rp_loc; 1890 ip = info->ip = *info->rp_loc;
1891 if (ip < GATE_ADDR) { 1891 if (ip < GATE_ADDR) {
1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip); 1892 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1893 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1894 return -1; 1894 return -1;
1895 } 1895 }
1896 1896
1897 /* validate the previous stack frame pointer */ 1897 /* validate the previous stack frame pointer */
1898 if (!unw_valid(info, info->pfs_loc)) { 1898 if (!unw_valid(info, info->pfs_loc)) {
1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__); 1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901 return -1; 1901 return -1;
1902 } 1902 }
@@ -1912,13 +1912,13 @@ unw_unwind (struct unw_frame_info *info)
1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */ 1912 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1913 info->pfs_loc = 1913 info->pfs_loc =
1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); 1914 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt); 1915 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1916 } else 1916 } else
1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ 1917 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); 1918 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { 1919 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", 1920 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1921 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top); 1921 __func__, info->bsp, info->regstk.limit, info->regstk.top);
1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1922 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1923 return -1; 1923 return -1;
1924 } 1924 }
@@ -1927,14 +1927,14 @@ unw_unwind (struct unw_frame_info *info)
1927 info->sp = info->psp; 1927 info->sp = info->psp;
1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { 1928 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", 1929 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1930 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit); 1930 __func__, info->sp, info->memstk.top, info->memstk.limit);
1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1931 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1932 return -1; 1932 return -1;
1933 } 1933 }
1934 1934
1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { 1935 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", 1936 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1937 __FUNCTION__, ip); 1937 __func__, ip);
1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1938 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1939 return -1; 1939 return -1;
1940 } 1940 }
@@ -1961,7 +1961,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) 1961 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1962 < IA64_PT_REGS_SIZE) { 1962 < IA64_PT_REGS_SIZE) {
1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", 1963 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1964 __FUNCTION__); 1964 __func__);
1965 break; 1965 break;
1966 } 1966 }
1967 if (unw_is_intr_frame(info) && 1967 if (unw_is_intr_frame(info) &&
@@ -1971,13 +1971,13 @@ unw_unwind_to_user (struct unw_frame_info *info)
1971 unw_get_rp(info, &ip); 1971 unw_get_rp(info, &ip);
1972 UNW_DPRINT(0, "unwind.%s: failed to read " 1972 UNW_DPRINT(0, "unwind.%s: failed to read "
1973 "predicate register (ip=0x%lx)\n", 1973 "predicate register (ip=0x%lx)\n",
1974 __FUNCTION__, ip); 1974 __func__, ip);
1975 return -1; 1975 return -1;
1976 } 1976 }
1977 } while (unw_unwind(info) >= 0); 1977 } while (unw_unwind(info) >= 0);
1978 unw_get_ip(info, &ip); 1978 unw_get_ip(info, &ip);
1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", 1979 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1980 __FUNCTION__, ip); 1980 __func__, ip);
1981 return -1; 1981 return -1;
1982} 1982}
1983EXPORT_SYMBOL(unw_unwind_to_user); 1983EXPORT_SYMBOL(unw_unwind_to_user);
@@ -2028,7 +2028,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2028 " pr 0x%lx\n" 2028 " pr 0x%lx\n"
2029 " sw 0x%lx\n" 2029 " sw 0x%lx\n"
2030 " sp 0x%lx\n", 2030 " sp 0x%lx\n",
2031 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, 2031 __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2032 info->pr, (unsigned long) info->sw, info->sp); 2032 info->pr, (unsigned long) info->sw, info->sp);
2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); 2033 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2034} 2034}
@@ -2047,7 +2047,7 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
2047 " bsp 0x%lx\n" 2047 " bsp 0x%lx\n"
2048 " sol 0x%lx\n" 2048 " sol 0x%lx\n"
2049 " ip 0x%lx\n", 2049 " ip 0x%lx\n",
2050 __FUNCTION__, info->bsp, sol, info->ip); 2050 __func__, info->bsp, sol, info->ip);
2051 find_save_locs(info); 2051 find_save_locs(info);
2052} 2052}
2053 2053
@@ -2058,7 +2058,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2058{ 2058{
2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); 2059 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2060 2060
2061 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__); 2061 UNW_DPRINT(1, "unwind.%s\n", __func__);
2062 unw_init_frame_info(info, t, sw); 2062 unw_init_frame_info(info, t, sw);
2063} 2063}
2064EXPORT_SYMBOL(unw_init_from_blocked_task); 2064EXPORT_SYMBOL(unw_init_from_blocked_task);
@@ -2088,7 +2088,7 @@ unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned lon
2088 2088
2089 if (end - start <= 0) { 2089 if (end - start <= 0) {
2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", 2090 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2091 __FUNCTION__); 2091 __func__);
2092 return NULL; 2092 return NULL;
2093 } 2093 }
2094 2094
@@ -2119,14 +2119,14 @@ unw_remove_unwind_table (void *handle)
2119 2119
2120 if (!handle) { 2120 if (!handle) {
2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", 2121 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2122 __FUNCTION__); 2122 __func__);
2123 return; 2123 return;
2124 } 2124 }
2125 2125
2126 table = handle; 2126 table = handle;
2127 if (table == &unw.kernel_table) { 2127 if (table == &unw.kernel_table) {
2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " 2128 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2129 "no-can-do!\n", __FUNCTION__); 2129 "no-can-do!\n", __func__);
2130 return; 2130 return;
2131 } 2131 }
2132 2132
@@ -2139,7 +2139,7 @@ unw_remove_unwind_table (void *handle)
2139 break; 2139 break;
2140 if (!prev) { 2140 if (!prev) {
2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", 2141 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2142 __FUNCTION__, (void *) table); 2142 __func__, (void *) table);
2143 spin_unlock_irqrestore(&unw.lock, flags); 2143 spin_unlock_irqrestore(&unw.lock, flags);
2144 return; 2144 return;
2145 } 2145 }
@@ -2185,7 +2185,7 @@ create_gate_table (void)
2185 } 2185 }
2186 2186
2187 if (!punw) { 2187 if (!punw) {
2188 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__); 2188 printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2189 return 0; 2189 return 0;
2190 } 2190 }
2191 2191
@@ -2202,7 +2202,7 @@ create_gate_table (void)
2202 unw.gate_table = kmalloc(size, GFP_KERNEL); 2202 unw.gate_table = kmalloc(size, GFP_KERNEL);
2203 if (!unw.gate_table) { 2203 if (!unw.gate_table) {
2204 unw.gate_table_size = 0; 2204 unw.gate_table_size = 0;
2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__); 2205 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2206 return 0; 2206 return 0;
2207 } 2207 }
2208 unw.gate_table_size = size; 2208 unw.gate_table_size = size;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 3e69881648a3..23088bed111e 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -26,7 +26,7 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
26 if (!user_mode(regs)) { 26 if (!user_mode(regs)) {
27 /* kprobe_running() needs smp_processor_id() */ 27 /* kprobe_running() needs smp_processor_id() */
28 preempt_disable(); 28 preempt_disable();
29 if (kprobe_running() && kprobes_fault_handler(regs, trap)) 29 if (kprobe_running() && kprobe_fault_handler(regs, trap))
30 ret = 1; 30 ret = 1;
31 preempt_enable(); 31 preempt_enable();
32 } 32 }
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 25aef6211a54..a4ca657c72c6 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -714,7 +714,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
714 714
715 if (ret) 715 if (ret)
716 printk("%s: Problem encountered in __add_pages() as ret=%d\n", 716 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
717 __FUNCTION__, ret); 717 __func__, ret);
718 718
719 return ret; 719 return ret;
720} 720}
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
index 245dc1fedc24..f5959c0c1810 100644
--- a/arch/ia64/pci/fixup.c
+++ b/arch/ia64/pci/fixup.c
@@ -63,7 +63,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
63 pci_read_config_word(pdev, PCI_COMMAND, &config); 63 pci_read_config_word(pdev, PCI_COMMAND, &config);
64 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 64 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
65 pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; 65 pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
66 printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); 66 dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
67 } 67 }
68} 68}
69DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); 69DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 8fd7e825192b..e282c348dcde 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -765,7 +765,7 @@ static void __init set_pci_cacheline_size(void)
765 status = ia64_pal_cache_summary(&levels, &unique_caches); 765 status = ia64_pal_cache_summary(&levels, &unique_caches);
766 if (status != 0) { 766 if (status != 0) {
767 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " 767 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
768 "(status=%ld)\n", __FUNCTION__, status); 768 "(status=%ld)\n", __func__, status);
769 return; 769 return;
770 } 770 }
771 771
@@ -773,7 +773,7 @@ static void __init set_pci_cacheline_size(void)
773 /* cache_type (data_or_unified)= */ 2, &cci); 773 /* cache_type (data_or_unified)= */ 2, &cci);
774 if (status != 0) { 774 if (status != 0) {
775 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " 775 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
776 "(status=%ld)\n", __FUNCTION__, status); 776 "(status=%ld)\n", __func__, status);
777 return; 777 return;
778 } 778 }
779 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 779 pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index b663168da55c..0101c7924a4d 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -37,7 +37,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
37 (u64) nasid, 0, 0, 0, 0, 0, 0); 37 (u64) nasid, 0, 0, 0, 0, 0, 0);
38 38
39 if ((int)ret_stuff.v0) 39 if ((int)ret_stuff.v0)
40 panic("%s: Fatal %s Error", __FUNCTION__, 40 panic("%s: Fatal %s Error", __func__,
41 ((nasid & 1) ? "TIO" : "HUBII")); 41 ((nasid & 1) ? "TIO" : "HUBII"));
42 42
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
@@ -48,7 +48,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
48 (u64) nasid, 0, 0, 0, 0, 0, 0); 48 (u64) nasid, 0, 0, 0, 0, 0, 0);
49 49
50 if ((int)ret_stuff.v0) 50 if ((int)ret_stuff.v0)
51 panic("%s: Fatal TIO Error", __FUNCTION__); 51 panic("%s: Fatal TIO Error", __func__);
52 } else 52 } else
53 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 53 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
54 54
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index 3c7178f5dce8..6568942a95f0 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -133,7 +133,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
133 if (ACPI_FAILURE(status)) { 133 if (ACPI_FAILURE(status)) {
134 printk(KERN_ERR "%s: " 134 printk(KERN_ERR "%s: "
135 "acpi_get_vendor_resource() failed (0x%x) for: ", 135 "acpi_get_vendor_resource() failed (0x%x) for: ",
136 __FUNCTION__, status); 136 __func__, status);
137 acpi_ns_print_node_pathname(handle, NULL); 137 acpi_ns_print_node_pathname(handle, NULL);
138 printk("\n"); 138 printk("\n");
139 return NULL; 139 return NULL;
@@ -145,7 +145,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
145 sizeof(struct pcibus_bussoft *)) { 145 sizeof(struct pcibus_bussoft *)) {
146 printk(KERN_ERR 146 printk(KERN_ERR
147 "%s: Invalid vendor data length %d\n", 147 "%s: Invalid vendor data length %d\n",
148 __FUNCTION__, vendor->byte_length); 148 __func__, vendor->byte_length);
149 kfree(buffer.pointer); 149 kfree(buffer.pointer);
150 return NULL; 150 return NULL;
151 } 151 }
@@ -184,7 +184,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
184 if (ACPI_FAILURE(status)) { 184 if (ACPI_FAILURE(status)) {
185 printk(KERN_ERR 185 printk(KERN_ERR
186 "%s: acpi_get_vendor_resource() failed (0x%x) for: ", 186 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
187 __FUNCTION__, status); 187 __func__, status);
188 acpi_ns_print_node_pathname(handle, NULL); 188 acpi_ns_print_node_pathname(handle, NULL);
189 printk("\n"); 189 printk("\n");
190 return 1; 190 return 1;
@@ -196,7 +196,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
196 sizeof(struct pci_devdev_info *)) { 196 sizeof(struct pci_devdev_info *)) {
197 printk(KERN_ERR 197 printk(KERN_ERR
198 "%s: Invalid vendor data length: %d for: ", 198 "%s: Invalid vendor data length: %d for: ",
199 __FUNCTION__, vendor->byte_length); 199 __func__, vendor->byte_length);
200 acpi_ns_print_node_pathname(handle, NULL); 200 acpi_ns_print_node_pathname(handle, NULL);
201 printk("\n"); 201 printk("\n");
202 ret = 1; 202 ret = 1;
@@ -205,7 +205,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
205 205
206 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 206 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
207 if (!pcidev_ptr) 207 if (!pcidev_ptr)
208 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); 208 panic("%s: Unable to alloc memory for pcidev_info", __func__);
209 209
210 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); 210 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
211 pcidev_prom_ptr = __va(addr); 211 pcidev_prom_ptr = __va(addr);
@@ -214,7 +214,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
214 /* Get the IRQ info */ 214 /* Get the IRQ info */
215 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 215 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
216 if (!irq_info) 216 if (!irq_info)
217 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); 217 panic("%s: Unable to alloc memory for sn_irq_info", __func__);
218 218
219 if (pcidev_ptr->pdi_sn_irq_info) { 219 if (pcidev_ptr->pdi_sn_irq_info) {
220 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); 220 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
@@ -249,10 +249,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
249 status = acpi_get_parent(child, &parent); 249 status = acpi_get_parent(child, &parent);
250 if (ACPI_FAILURE(status)) { 250 if (ACPI_FAILURE(status)) {
251 printk(KERN_ERR "%s: acpi_get_parent() failed " 251 printk(KERN_ERR "%s: acpi_get_parent() failed "
252 "(0x%x) for: ", __FUNCTION__, status); 252 "(0x%x) for: ", __func__, status);
253 acpi_ns_print_node_pathname(child, NULL); 253 acpi_ns_print_node_pathname(child, NULL);
254 printk("\n"); 254 printk("\n");
255 panic("%s: Unable to find host devfn\n", __FUNCTION__); 255 panic("%s: Unable to find host devfn\n", __func__);
256 } 256 }
257 if (parent == rootbus_handle) 257 if (parent == rootbus_handle)
258 break; 258 break;
@@ -260,7 +260,7 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
260 } 260 }
261 if (!child) { 261 if (!child) {
262 printk(KERN_ERR "%s: Unable to find root bus for: ", 262 printk(KERN_ERR "%s: Unable to find root bus for: ",
263 __FUNCTION__); 263 __func__);
264 acpi_ns_print_node_pathname(device_handle, NULL); 264 acpi_ns_print_node_pathname(device_handle, NULL);
265 printk("\n"); 265 printk("\n");
266 BUG(); 266 BUG();
@@ -269,10 +269,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
269 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); 269 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
270 if (ACPI_FAILURE(status)) { 270 if (ACPI_FAILURE(status)) {
271 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", 271 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
272 __FUNCTION__, status); 272 __func__, status);
273 acpi_ns_print_node_pathname(child, NULL); 273 acpi_ns_print_node_pathname(child, NULL);
274 printk("\n"); 274 printk("\n");
275 panic("%s: Unable to find host devfn\n", __FUNCTION__); 275 panic("%s: Unable to find host devfn\n", __func__);
276 } 276 }
277 277
278 slot = (adr >> 16) & 0xffff; 278 slot = (adr >> 16) & 0xffff;
@@ -308,7 +308,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
308 if (ACPI_FAILURE(status)) { 308 if (ACPI_FAILURE(status)) {
309 printk(KERN_ERR 309 printk(KERN_ERR
310 "%s: acpi_get_parent() failed (0x%x) for: ", 310 "%s: acpi_get_parent() failed (0x%x) for: ",
311 __FUNCTION__, status); 311 __func__, status);
312 acpi_ns_print_node_pathname(handle, NULL); 312 acpi_ns_print_node_pathname(handle, NULL);
313 printk("\n"); 313 printk("\n");
314 return AE_OK; 314 return AE_OK;
@@ -318,7 +318,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
318 if (ACPI_FAILURE(status)) { 318 if (ACPI_FAILURE(status)) {
319 printk(KERN_ERR 319 printk(KERN_ERR
320 "%s: Failed to find _BBN in parent of: ", 320 "%s: Failed to find _BBN in parent of: ",
321 __FUNCTION__); 321 __func__);
322 acpi_ns_print_node_pathname(handle, NULL); 322 acpi_ns_print_node_pathname(handle, NULL);
323 printk("\n"); 323 printk("\n");
324 return AE_OK; 324 return AE_OK;
@@ -358,14 +358,14 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
358 if (segment != pci_domain_nr(dev)) { 358 if (segment != pci_domain_nr(dev)) {
359 printk(KERN_ERR 359 printk(KERN_ERR
360 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ", 360 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
361 __FUNCTION__, segment, pci_domain_nr(dev)); 361 __func__, segment, pci_domain_nr(dev));
362 acpi_ns_print_node_pathname(rootbus_handle, NULL); 362 acpi_ns_print_node_pathname(rootbus_handle, NULL);
363 printk("\n"); 363 printk("\n");
364 return 1; 364 return 1;
365 } 365 }
366 } else { 366 } else {
367 printk(KERN_ERR "%s: Unable to get __SEG from: ", 367 printk(KERN_ERR "%s: Unable to get __SEG from: ",
368 __FUNCTION__); 368 __func__);
369 acpi_ns_print_node_pathname(rootbus_handle, NULL); 369 acpi_ns_print_node_pathname(rootbus_handle, NULL);
370 printk("\n"); 370 printk("\n");
371 return 1; 371 return 1;
@@ -386,7 +386,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
386 if (!pcidev_match.handle) { 386 if (!pcidev_match.handle) {
387 printk(KERN_ERR 387 printk(KERN_ERR
388 "%s: Could not find matching ACPI device for %s.\n", 388 "%s: Could not find matching ACPI device for %s.\n",
389 __FUNCTION__, pci_name(dev)); 389 __func__, pci_name(dev));
390 return 1; 390 return 1;
391 } 391 }
392 392
@@ -422,7 +422,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
422 422
423 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { 423 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
424 panic("%s: Failure obtaining pcidev_info for %s\n", 424 panic("%s: Failure obtaining pcidev_info for %s\n",
425 __FUNCTION__, pci_name(dev)); 425 __func__, pci_name(dev));
426 } 426 }
427 427
428 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 428 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
@@ -463,7 +463,7 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
463 printk(KERN_ERR 463 printk(KERN_ERR
464 "%s: 0x%04x:0x%02x Unable to " 464 "%s: 0x%04x:0x%02x Unable to "
465 "obtain prom_bussoft_ptr\n", 465 "obtain prom_bussoft_ptr\n",
466 __FUNCTION__, pci_domain_nr(bus), bus->number); 466 __func__, pci_domain_nr(bus), bus->number);
467 return; 467 return;
468 } 468 }
469 sn_common_bus_fixup(bus, prom_bussoft_ptr); 469 sn_common_bus_fixup(bus, prom_bussoft_ptr);
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index c4eb84f9e781..8a924a5661dd 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
364 364
365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); 365 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
366 if (!element) { 366 if (!element) {
367 dev_dbg(&dev->dev, "%s: out of memory!\n", __FUNCTION__); 367 dev_dbg(&dev->dev, "%s: out of memory!\n", __func__);
368 return; 368 return;
369 } 369 }
370 element->sysdata = SN_PCIDEV_INFO(dev); 370 element->sysdata = SN_PCIDEV_INFO(dev);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 906b93674b76..c3aa851d1ca6 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -209,11 +209,11 @@ sn_io_slot_fixup(struct pci_dev *dev)
209 209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info) 211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); 212 panic("%s: Unable to alloc memory for pcidev_info", __func__);
213 213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info) 215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); 216 panic("%s: Unable to alloc memory for sn_irq_info", __func__);
217 217
218 /* Call to retrieve pci device information needed by kernel. */ 218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev), 219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 868c9aa64fe2..27793f7aa99c 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -100,7 +100,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
100 if (!newbuf) { 100 if (!newbuf) {
101 mutex_unlock(&sn_oemdata_mutex); 101 mutex_unlock(&sn_oemdata_mutex);
102 printk(KERN_ERR "%s: unable to extend sn_oemdata\n", 102 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
103 __FUNCTION__); 103 __func__);
104 return 1; 104 return 1;
105 } 105 }
106 vfree(*sn_oemdata); 106 vfree(*sn_oemdata);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 511db2fd7bff..18b94b792d54 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -116,7 +116,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, 116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
117 SN_DMA_ADDR_PHYS); 117 SN_DMA_ADDR_PHYS);
118 if (!*dma_handle) { 118 if (!*dma_handle) {
119 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 119 printk(KERN_ERR "%s: out of ATEs\n", __func__);
120 free_pages((unsigned long)cpuaddr, get_order(size)); 120 free_pages((unsigned long)cpuaddr, get_order(size));
121 return NULL; 121 return NULL;
122 } 122 }
@@ -179,7 +179,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
179 phys_addr = __pa(cpu_addr); 179 phys_addr = __pa(cpu_addr);
180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); 180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
181 if (!dma_addr) { 181 if (!dma_addr) {
182 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 182 printk(KERN_ERR "%s: out of ATEs\n", __func__);
183 return 0; 183 return 0;
184 } 184 }
185 return dma_addr; 185 return dma_addr;
@@ -266,7 +266,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
266 SN_DMA_ADDR_PHYS); 266 SN_DMA_ADDR_PHYS);
267 267
268 if (!sg->dma_address) { 268 if (!sg->dma_address) {
269 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 269 printk(KERN_ERR "%s: out of ATEs\n", __func__);
270 270
271 /* 271 /*
272 * Free any successfully allocated entries. 272 * Free any successfully allocated entries.
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index ef048a674772..529462c01570 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -88,7 +88,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
88 break; 88 break;
89 default: 89 default:
90 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " 90 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
91 "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); 91 "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
92 return -1; 92 return -1;
93 } 93 }
94 94
@@ -124,7 +124,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
124 if (!tmp) { 124 if (!tmp) {
125 printk(KERN_ERR "%s: Could not allocate " 125 printk(KERN_ERR "%s: Could not allocate "
126 "%lu bytes (order %d) for GART\n", 126 "%lu bytes (order %d) for GART\n",
127 __FUNCTION__, 127 __func__,
128 tioca_kern->ca_gart_size, 128 tioca_kern->ca_gart_size,
129 get_order(tioca_kern->ca_gart_size)); 129 get_order(tioca_kern->ca_gart_size));
130 return -ENOMEM; 130 return -ENOMEM;
@@ -341,7 +341,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
341 341
342 if (node_upper > 64) { 342 if (node_upper > 64) {
343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out " 343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
344 "of range\n", __FUNCTION__, (void *)ct_addr); 344 "of range\n", __func__, (void *)ct_addr);
345 return 0; 345 return 0;
346 } 346 }
347 347
@@ -349,7 +349,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { 349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
350 printk(KERN_ERR "%s: coretalk upper node (%u) " 350 printk(KERN_ERR "%s: coretalk upper node (%u) "
351 "mismatch with ca_agp_dma_addr_extn (%lu)\n", 351 "mismatch with ca_agp_dma_addr_extn (%lu)\n",
352 __FUNCTION__, 352 __func__,
353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); 353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
354 return 0; 354 return 0;
355 } 355 }
@@ -597,7 +597,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
597 if (is_shub1() && sn_sal_rev() < 0x0406) { 597 if (is_shub1() && sn_sal_rev() < 0x0406) {
598 printk 598 printk
599 (KERN_ERR "%s: SGI prom rev 4.06 or greater required " 599 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
600 "for tioca support\n", __FUNCTION__); 600 "for tioca support\n", __func__);
601 return NULL; 601 return NULL;
602 } 602 }
603 603
@@ -651,7 +651,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
651 printk(KERN_WARNING 651 printk(KERN_WARNING
652 "%s: Unable to get irq %d. " 652 "%s: Unable to get irq %d. "
653 "Error interrupts won't be routed for TIOCA bus %d\n", 653 "Error interrupts won't be routed for TIOCA bus %d\n",
654 __FUNCTION__, SGI_TIOCA_ERROR, 654 __func__, SGI_TIOCA_ERROR,
655 (int)tioca_common->ca_common.bs_persist_busnum); 655 (int)tioca_common->ca_common.bs_persist_busnum);
656 656
657 sn_set_err_irq_affinity(SGI_TIOCA_ERROR); 657 sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 999f14f986e2..9b3c11373022 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -494,7 +494,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
494 if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { 494 if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
495 printk(KERN_WARNING 495 printk(KERN_WARNING
496 "%s: %s - no map found for bus_addr 0x%lx\n", 496 "%s: %s - no map found for bus_addr 0x%lx\n",
497 __FUNCTION__, pci_name(pdev), bus_addr); 497 __func__, pci_name(pdev), bus_addr);
498 } else if (--map->refcnt == 0) { 498 } else if (--map->refcnt == 0) {
499 for (i = 0; i < map->ate_count; i++) { 499 for (i = 0; i < map->ate_count; i++) {
500 map->ate_shadow[i] = 0; 500 map->ate_shadow[i] = 0;
@@ -1030,7 +1030,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
1030 "%s: Unable to get irq %d. " 1030 "%s: Unable to get irq %d. "
1031 "Error interrupts won't be routed for " 1031 "Error interrupts won't be routed for "
1032 "TIOCE bus %04x:%02x\n", 1032 "TIOCE bus %04x:%02x\n",
1033 __FUNCTION__, SGI_PCIASIC_ERROR, 1033 __func__, SGI_PCIASIC_ERROR,
1034 tioce_common->ce_pcibus.bs_persist_segment, 1034 tioce_common->ce_pcibus.bs_persist_segment,
1035 tioce_common->ce_pcibus.bs_persist_busnum); 1035 tioce_common->ce_pcibus.bs_persist_busnum);
1036 1036