aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig23
-rw-r--r--arch/ia64/configs/sn2_defconfig1
-rw-r--r--arch/ia64/ia32/sys_ia32.c6
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/ia64/kernel/perfmon.c161
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c8
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c88
-rw-r--r--arch/ia64/kernel/signal.c4
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/kernel/traps.c6
-rw-r--r--arch/ia64/kernel/unaligned.c5
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/oprofile/Kconfig20
-rw-r--r--arch/ia64/sn/kernel/xpnet.c13
16 files changed, 168 insertions, 183 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c60532d93c54..bef47725d4ad 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -452,9 +452,9 @@ config IA64_PALINFO
452config IA64_MC_ERR_INJECT 452config IA64_MC_ERR_INJECT
453 tristate "MC error injection support" 453 tristate "MC error injection support"
454 help 454 help
455 Selets whether support for MC error injection. By enabling the 455 Adds support for MC error injection. If enabled, the kernel
456 support, kernel provide sysfs interface for user application to 456 will provide a sysfs interface for user applications to
457 call MC error injection PAL procedure to inject various errors. 457 call MC error injection PAL procedures to inject various errors.
458 This is a useful tool for MCA testing. 458 This is a useful tool for MCA testing.
459 459
460 If you're unsure, do not select this option. 460 If you're unsure, do not select this option.
@@ -491,7 +491,7 @@ config KEXEC
491 but it is independent of the system firmware. And like a reboot 491 but it is independent of the system firmware. And like a reboot
492 you can start any kernel with it, not just Linux. 492 you can start any kernel with it, not just Linux.
493 493
494 The name comes from the similiarity to the exec system call. 494 The name comes from the similarity to the exec system call.
495 495
496 It is an ongoing process to be certain the hardware in a machine 496 It is an ongoing process to be certain the hardware in a machine
497 is properly shutdown, so do not be surprised if this code does not 497 is properly shutdown, so do not be surprised if this code does not
@@ -592,20 +592,7 @@ config IRQ_PER_CPU
592 592
593source "arch/ia64/hp/sim/Kconfig" 593source "arch/ia64/hp/sim/Kconfig"
594 594
595menu "Instrumentation Support" 595source "kernel/Kconfig.instrumentation"
596
597source "arch/ia64/oprofile/Kconfig"
598
599config KPROBES
600 bool "Kprobes"
601 depends on KALLSYMS && MODULES
602 help
603 Kprobes allows you to trap at almost any kernel address and
604 execute a callback function. register_kprobe() establishes
605 a probepoint and specifies the callback. Kprobes is useful
606 for kernel debugging, non-intrusive instrumentation and testing.
607 If in doubt, say "N".
608endmenu
609 596
610source "arch/ia64/Kconfig.debug" 597source "arch/ia64/Kconfig.debug"
611 598
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 449d3e75bfc2..75fd90dc76a3 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -26,6 +26,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
26# CONFIG_AUDIT is not set 26# CONFIG_AUDIT is not set
27# CONFIG_IKCONFIG is not set 27# CONFIG_IKCONFIG is not set
28CONFIG_LOG_BUF_SHIFT=20 28CONFIG_LOG_BUF_SHIFT=20
29CONFIG_CGROUPS=y
29CONFIG_CPUSETS=y 30CONFIG_CPUSETS=y
30CONFIG_SYSFS_DEPRECATED=y 31CONFIG_SYSFS_DEPRECATED=y
31CONFIG_RELAY=y 32CONFIG_RELAY=y
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index a3405b3c1eef..d025a22eb225 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -773,7 +773,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
773 if (flags & MAP_SHARED) 773 if (flags & MAP_SHARED)
774 printk(KERN_INFO 774 printk(KERN_INFO
775 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", 775 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
776 current->comm, current->pid, start); 776 current->comm, task_pid_nr(current), start);
777 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, 777 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
778 off); 778 off);
779 if (IS_ERR((void *) ret)) 779 if (IS_ERR((void *) ret))
@@ -786,7 +786,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
786 if (flags & MAP_SHARED) 786 if (flags & MAP_SHARED)
787 printk(KERN_INFO 787 printk(KERN_INFO
788 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", 788 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
789 current->comm, current->pid, end); 789 current->comm, task_pid_nr(current), end);
790 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, 790 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
791 (off + len) - offset_in_page(end)); 791 (off + len) - offset_in_page(end));
792 if (IS_ERR((void *) ret)) 792 if (IS_ERR((void *) ret))
@@ -816,7 +816,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
816 816
817 if ((flags & MAP_SHARED) && !is_congruent) 817 if ((flags & MAP_SHARED) && !is_congruent)
818 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " 818 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
819 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); 819 "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
820 820
821 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, 821 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
822 is_congruent ? "congruent" : "not congruent", poff); 822 is_congruent ? "congruent" : "not congruent", poff);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 73ca86d03810..8e4894b205e2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -967,7 +967,7 @@ find_memmap_space (void)
967 * to use. We can allocate partial granules only if the unavailable 967 * to use. We can allocate partial granules only if the unavailable
968 * parts exist, and are WB. 968 * parts exist, and are WB.
969 */ 969 */
970void 970unsigned long
971efi_memmap_init(unsigned long *s, unsigned long *e) 971efi_memmap_init(unsigned long *s, unsigned long *e)
972{ 972{
973 struct kern_memdesc *k, *prev = NULL; 973 struct kern_memdesc *k, *prev = NULL;
@@ -1084,6 +1084,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1084 /* reserve the memory we are using for kern_memmap */ 1084 /* reserve the memory we are using for kern_memmap */
1085 *s = (u64)kern_memmap; 1085 *s = (u64)kern_memmap;
1086 *e = (u64)++k; 1086 *e = (u64)++k;
1087
1088 return total_mem;
1087} 1089}
1088 1090
1089void 1091void
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f55fa07849c4..59169bf7145f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -158,14 +158,14 @@
158 */ 158 */
159#define PROTECT_CTX(c, f) \ 159#define PROTECT_CTX(c, f) \
160 do { \ 160 do { \
161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ 161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
162 spin_lock_irqsave(&(c)->ctx_lock, f); \ 162 spin_lock_irqsave(&(c)->ctx_lock, f); \
163 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ 163 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 } while(0) 164 } while(0)
165 165
166#define UNPROTECT_CTX(c, f) \ 166#define UNPROTECT_CTX(c, f) \
167 do { \ 167 do { \
168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ 168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \ 169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
170 } while(0) 170 } while(0)
171 171
@@ -227,12 +227,12 @@
227#ifdef PFM_DEBUGGING 227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \ 228#define DPRINT(a) \
229 do { \ 229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0) 231 } while (0)
232 232
233#define DPRINT_ovfl(a) \ 233#define DPRINT_ovfl(a) \
234 do { \ 234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0) 236 } while (0)
237#endif 237#endif
238 238
@@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task)
913 unsigned long mask, val, ovfl_mask; 913 unsigned long mask, val, ovfl_mask;
914 int i; 914 int i;
915 915
916 DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); 916 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
917 917
918 ovfl_mask = pmu_conf->ovfl_val; 918 ovfl_mask = pmu_conf->ovfl_val;
919 /* 919 /*
@@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task)
992 ovfl_mask = pmu_conf->ovfl_val; 992 ovfl_mask = pmu_conf->ovfl_val;
993 993
994 if (task != current) { 994 if (task != current) {
995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); 995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
996 return; 996 return;
997 } 997 }
998 if (ctx->ctx_state != PFM_CTX_MASKED) { 998 if (ctx->ctx_state != PFM_CTX_MASKED) {
999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, 999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1000 task->pid, current->pid, ctx->ctx_state); 1000 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1001 return; 1001 return;
1002 } 1002 }
1003 psr = pfm_get_psr(); 1003 psr = pfm_get_psr();
@@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task)
1051 if ((mask & 0x1) == 0UL) continue; 1051 if ((mask & 0x1) == 0UL) continue;
1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; 1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1053 ia64_set_pmc(i, ctx->th_pmcs[i]); 1053 ia64_set_pmc(i, ctx->th_pmcs[i]);
1054 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); 1054 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1055 task_pid_nr(task), i, ctx->th_pmcs[i]));
1055 } 1056 }
1056 ia64_srlz_d(); 1057 ia64_srlz_d();
1057 1058
@@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1370 1371
1371error_conflict: 1372error_conflict:
1372 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", 1373 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1373 pfm_sessions.pfs_sys_session[cpu]->pid, 1374 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1374 cpu)); 1375 cpu));
1375abort: 1376abort:
1376 UNLOCK_PFS(flags); 1377 UNLOCK_PFS(flags);
@@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
1442 1443
1443 /* sanity checks */ 1444 /* sanity checks */
1444 if (task->mm == NULL || size == 0UL || vaddr == NULL) { 1445 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1445 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); 1446 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1446 return -EINVAL; 1447 return -EINVAL;
1447 } 1448 }
1448 1449
@@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
1459 1460
1460 up_write(&task->mm->mmap_sem); 1461 up_write(&task->mm->mmap_sem);
1461 if (r !=0) { 1462 if (r !=0) {
1462 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); 1463 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1463 } 1464 }
1464 1465
1465 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); 1466 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
@@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
1501 return 0; 1502 return 0;
1502 1503
1503invalid_free: 1504invalid_free:
1504 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); 1505 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1505 return -EINVAL; 1506 return -EINVAL;
1506} 1507}
1507#endif 1508#endif
@@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1547 unsigned long flags; 1548 unsigned long flags;
1548 DECLARE_WAITQUEUE(wait, current); 1549 DECLARE_WAITQUEUE(wait, current);
1549 if (PFM_IS_FILE(filp) == 0) { 1550 if (PFM_IS_FILE(filp) == 0) {
1550 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); 1551 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1551 return -EINVAL; 1552 return -EINVAL;
1552 } 1553 }
1553 1554
1554 ctx = (pfm_context_t *)filp->private_data; 1555 ctx = (pfm_context_t *)filp->private_data;
1555 if (ctx == NULL) { 1556 if (ctx == NULL) {
1556 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); 1557 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1557 return -EINVAL; 1558 return -EINVAL;
1558 } 1559 }
1559 1560
@@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1607 1608
1608 PROTECT_CTX(ctx, flags); 1609 PROTECT_CTX(ctx, flags);
1609 } 1610 }
1610 DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); 1611 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1611 set_current_state(TASK_RUNNING); 1612 set_current_state(TASK_RUNNING);
1612 remove_wait_queue(&ctx->ctx_msgq_wait, &wait); 1613 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1613 1614
@@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1616 ret = -EINVAL; 1617 ret = -EINVAL;
1617 msg = pfm_get_next_msg(ctx); 1618 msg = pfm_get_next_msg(ctx);
1618 if (msg == NULL) { 1619 if (msg == NULL) {
1619 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); 1620 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1620 goto abort_locked; 1621 goto abort_locked;
1621 } 1622 }
1622 1623
@@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait)
1647 unsigned int mask = 0; 1648 unsigned int mask = 0;
1648 1649
1649 if (PFM_IS_FILE(filp) == 0) { 1650 if (PFM_IS_FILE(filp) == 0) {
1650 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); 1651 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1651 return 0; 1652 return 0;
1652 } 1653 }
1653 1654
1654 ctx = (pfm_context_t *)filp->private_data; 1655 ctx = (pfm_context_t *)filp->private_data;
1655 if (ctx == NULL) { 1656 if (ctx == NULL) {
1656 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); 1657 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1657 return 0; 1658 return 0;
1658 } 1659 }
1659 1660
@@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1692 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); 1693 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1693 1694
1694 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", 1695 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1695 current->pid, 1696 task_pid_nr(current),
1696 fd, 1697 fd,
1697 on, 1698 on,
1698 ctx->ctx_async_queue, ret)); 1699 ctx->ctx_async_queue, ret));
@@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on)
1707 int ret; 1708 int ret;
1708 1709
1709 if (PFM_IS_FILE(filp) == 0) { 1710 if (PFM_IS_FILE(filp) == 0) {
1710 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); 1711 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1711 return -EBADF; 1712 return -EBADF;
1712 } 1713 }
1713 1714
1714 ctx = (pfm_context_t *)filp->private_data; 1715 ctx = (pfm_context_t *)filp->private_data;
1715 if (ctx == NULL) { 1716 if (ctx == NULL) {
1716 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); 1717 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1717 return -EBADF; 1718 return -EBADF;
1718 } 1719 }
1719 /* 1720 /*
@@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info)
1759 if (owner != ctx->ctx_task) { 1760 if (owner != ctx->ctx_task) {
1760 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", 1761 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1761 smp_processor_id(), 1762 smp_processor_id(),
1762 owner->pid, ctx->ctx_task->pid); 1763 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1763 return; 1764 return;
1764 } 1765 }
1765 if (GET_PMU_CTX() != ctx) { 1766 if (GET_PMU_CTX() != ctx) {
@@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info)
1769 return; 1770 return;
1770 } 1771 }
1771 1772
1772 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); 1773 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1773 /* 1774 /*
1774 * the context is already protected in pfm_close(), we simply 1775 * the context is already protected in pfm_close(), we simply
1775 * need to mask interrupts to avoid a PMU interrupt race on 1776 * need to mask interrupts to avoid a PMU interrupt race on
@@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1821 1822
1822 ctx = (pfm_context_t *)filp->private_data; 1823 ctx = (pfm_context_t *)filp->private_data;
1823 if (ctx == NULL) { 1824 if (ctx == NULL) {
1824 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); 1825 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1825 return -EBADF; 1826 return -EBADF;
1826 } 1827 }
1827 1828
@@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp)
1969 1970
1970 ctx = (pfm_context_t *)filp->private_data; 1971 ctx = (pfm_context_t *)filp->private_data;
1971 if (ctx == NULL) { 1972 if (ctx == NULL) {
1972 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); 1973 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1973 return -EBADF; 1974 return -EBADF;
1974 } 1975 }
1975 1976
@@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp)
2066 */ 2067 */
2067 ctx->ctx_state = PFM_CTX_ZOMBIE; 2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2068 2069
2069 DPRINT(("zombie ctx for [%d]\n", task->pid)); 2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2070 /* 2071 /*
2071 * cannot free the context on the spot. deferred until 2072 * cannot free the context on the spot. deferred until
2072 * the task notices the ZOMBIE state 2073 * the task notices the ZOMBIE state
@@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
2472 /* invoke and lock buffer format, if found */ 2473 /* invoke and lock buffer format, if found */
2473 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); 2474 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2474 if (fmt == NULL) { 2475 if (fmt == NULL) {
2475 DPRINT(("[%d] cannot find buffer format\n", task->pid)); 2476 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2476 return -EINVAL; 2477 return -EINVAL;
2477 } 2478 }
2478 2479
@@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
2483 2484
2484 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); 2485 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2485 2486
2486 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); 2487 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2487 2488
2488 if (ret) goto error; 2489 if (ret) goto error;
2489 2490
@@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2605 * no kernel task or task not owner by caller 2606 * no kernel task or task not owner by caller
2606 */ 2607 */
2607 if (task->mm == NULL) { 2608 if (task->mm == NULL) {
2608 DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); 2609 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2609 return -EPERM; 2610 return -EPERM;
2610 } 2611 }
2611 if (pfm_bad_permissions(task)) { 2612 if (pfm_bad_permissions(task)) {
2612 DPRINT(("no permission to attach to [%d]\n", task->pid)); 2613 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2613 return -EPERM; 2614 return -EPERM;
2614 } 2615 }
2615 /* 2616 /*
2616 * cannot block in self-monitoring mode 2617 * cannot block in self-monitoring mode
2617 */ 2618 */
2618 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { 2619 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2619 DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); 2620 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2620 return -EINVAL; 2621 return -EINVAL;
2621 } 2622 }
2622 2623
2623 if (task->exit_state == EXIT_ZOMBIE) { 2624 if (task->exit_state == EXIT_ZOMBIE) {
2624 DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); 2625 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2625 return -EBUSY; 2626 return -EBUSY;
2626 } 2627 }
2627 2628
@@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2631 if (task == current) return 0; 2632 if (task == current) return 0;
2632 2633
2633 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 2634 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
2634 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); 2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2635 return -EBUSY; 2636 return -EBUSY;
2636 } 2637 }
2637 /* 2638 /*
@@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task)
3512 3513
3513 if (pmu_conf->use_rr_dbregs == 0) return 0; 3514 if (pmu_conf->use_rr_dbregs == 0) return 0;
3514 3515
3515 DPRINT(("called for [%d]\n", task->pid)); 3516 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3516 3517
3517 /* 3518 /*
3518 * do it only once 3519 * do it only once
@@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task)
3543 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", 3544 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3544 pfm_sessions.pfs_ptrace_use_dbregs, 3545 pfm_sessions.pfs_ptrace_use_dbregs,
3545 pfm_sessions.pfs_sys_use_dbregs, 3546 pfm_sessions.pfs_sys_use_dbregs,
3546 task->pid, ret)); 3547 task_pid_nr(task), ret));
3547 3548
3548 UNLOCK_PFS(flags); 3549 UNLOCK_PFS(flags);
3549 3550
@@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task)
3568 3569
3569 LOCK_PFS(flags); 3570 LOCK_PFS(flags);
3570 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { 3571 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3571 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); 3572 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3572 ret = -1; 3573 ret = -1;
3573 } else { 3574 } else {
3574 pfm_sessions.pfs_ptrace_use_dbregs--; 3575 pfm_sessions.pfs_ptrace_use_dbregs--;
@@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3620 3621
3621 /* sanity check */ 3622 /* sanity check */
3622 if (unlikely(task == NULL)) { 3623 if (unlikely(task == NULL)) {
3623 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); 3624 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3624 return -EINVAL; 3625 return -EINVAL;
3625 } 3626 }
3626 3627
@@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3629 fmt = ctx->ctx_buf_fmt; 3630 fmt = ctx->ctx_buf_fmt;
3630 3631
3631 DPRINT(("restarting self %d ovfl=0x%lx\n", 3632 DPRINT(("restarting self %d ovfl=0x%lx\n",
3632 task->pid, 3633 task_pid_nr(task),
3633 ctx->ctx_ovfl_regs[0])); 3634 ctx->ctx_ovfl_regs[0]));
3634 3635
3635 if (CTX_HAS_SMPL(ctx)) { 3636 if (CTX_HAS_SMPL(ctx)) {
@@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3653 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); 3654 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3654 3655
3655 if (rst_ctrl.bits.mask_monitoring == 0) { 3656 if (rst_ctrl.bits.mask_monitoring == 0) {
3656 DPRINT(("resuming monitoring for [%d]\n", task->pid)); 3657 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3657 3658
3658 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); 3659 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3659 } else { 3660 } else {
3660 DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); 3661 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3661 3662
3662 // cannot use pfm_stop_monitoring(task, regs); 3663 // cannot use pfm_stop_monitoring(task, regs);
3663 } 3664 }
@@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3714 * "self-monitoring". 3715 * "self-monitoring".
3715 */ 3716 */
3716 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { 3717 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3717 DPRINT(("unblocking [%d] \n", task->pid)); 3718 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
3718 complete(&ctx->ctx_restart_done); 3719 complete(&ctx->ctx_restart_done);
3719 } else { 3720 } else {
3720 DPRINT(("[%d] armed exit trap\n", task->pid)); 3721 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3721 3722
3722 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; 3723 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3723 3724
@@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
3805 * don't bother if we are loaded and task is being debugged 3806 * don't bother if we are loaded and task is being debugged
3806 */ 3807 */
3807 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { 3808 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3808 DPRINT(("debug registers already in use for [%d]\n", task->pid)); 3809 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3809 return -EBUSY; 3810 return -EBUSY;
3810 } 3811 }
3811 3812
@@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
3846 * is shared by all processes running on it 3847 * is shared by all processes running on it
3847 */ 3848 */
3848 if (first_time && can_access_pmu) { 3849 if (first_time && can_access_pmu) {
3849 DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); 3850 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3850 for (i=0; i < pmu_conf->num_ibrs; i++) { 3851 for (i=0; i < pmu_conf->num_ibrs; i++) {
3851 ia64_set_ibr(i, 0UL); 3852 ia64_set_ibr(i, 0UL);
3852 ia64_dv_serialize_instruction(); 3853 ia64_dv_serialize_instruction();
@@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4035 return -EBUSY; 4036 return -EBUSY;
4036 } 4037 }
4037 DPRINT(("task [%d] ctx_state=%d is_system=%d\n", 4038 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4038 PFM_CTX_TASK(ctx)->pid, 4039 task_pid_nr(PFM_CTX_TASK(ctx)),
4039 state, 4040 state,
4040 is_system)); 4041 is_system));
4041 /* 4042 /*
@@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4093 * monitoring disabled in kernel at next reschedule 4094 * monitoring disabled in kernel at next reschedule
4094 */ 4095 */
4095 ctx->ctx_saved_psr_up = 0; 4096 ctx->ctx_saved_psr_up = 0;
4096 DPRINT(("task=[%d]\n", task->pid)); 4097 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4097 } 4098 }
4098 return 0; 4099 return 0;
4099} 4100}
@@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4298 4299
4299 if (is_system) { 4300 if (is_system) {
4300 if (pfm_sessions.pfs_ptrace_use_dbregs) { 4301 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4301 DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); 4302 DPRINT(("cannot load [%d] dbregs in use\n",
4303 task_pid_nr(task)));
4302 ret = -EBUSY; 4304 ret = -EBUSY;
4303 } else { 4305 } else {
4304 pfm_sessions.pfs_sys_use_dbregs++; 4306 pfm_sessions.pfs_sys_use_dbregs++;
4305 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); 4307 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4306 set_dbregs = 1; 4308 set_dbregs = 1;
4307 } 4309 }
4308 } 4310 }
@@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4394 4396
4395 /* allow user level control */ 4397 /* allow user level control */
4396 ia64_psr(regs)->sp = 0; 4398 ia64_psr(regs)->sp = 0;
4397 DPRINT(("clearing psr.sp for [%d]\n", task->pid)); 4399 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4398 4400
4399 SET_LAST_CPU(ctx, smp_processor_id()); 4401 SET_LAST_CPU(ctx, smp_processor_id());
4400 INC_ACTIVATION(); 4402 INC_ACTIVATION();
@@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4429 */ 4431 */
4430 SET_PMU_OWNER(task, ctx); 4432 SET_PMU_OWNER(task, ctx);
4431 4433
4432 DPRINT(("context loaded on PMU for [%d]\n", task->pid)); 4434 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4433 } else { 4435 } else {
4434 /* 4436 /*
4435 * when not current, task MUST be stopped, so this is safe 4437 * when not current, task MUST be stopped, so this is safe
@@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4493 int prev_state, is_system; 4495 int prev_state, is_system;
4494 int ret; 4496 int ret;
4495 4497
4496 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); 4498 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4497 4499
4498 prev_state = ctx->ctx_state; 4500 prev_state = ctx->ctx_state;
4499 is_system = ctx->ctx_fl_system; 4501 is_system = ctx->ctx_fl_system;
@@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4568 */ 4570 */
4569 ia64_psr(regs)->sp = 1; 4571 ia64_psr(regs)->sp = 1;
4570 4572
4571 DPRINT(("setting psr.sp for [%d]\n", task->pid)); 4573 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4572 } 4574 }
4573 /* 4575 /*
4574 * save PMDs to context 4576 * save PMDs to context
@@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4608 ctx->ctx_fl_can_restart = 0; 4610 ctx->ctx_fl_can_restart = 0;
4609 ctx->ctx_fl_going_zombie = 0; 4611 ctx->ctx_fl_going_zombie = 0;
4610 4612
4611 DPRINT(("disconnected [%d] from context\n", task->pid)); 4613 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4612 4614
4613 return 0; 4615 return 0;
4614} 4616}
@@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task)
4631 4633
4632 PROTECT_CTX(ctx, flags); 4634 PROTECT_CTX(ctx, flags);
4633 4635
4634 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); 4636 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4635 4637
4636 state = ctx->ctx_state; 4638 state = ctx->ctx_state;
4637 switch(state) { 4639 switch(state) {
@@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task)
4640 * only comes to this function if pfm_context is not NULL, i.e., cannot 4642 * only comes to this function if pfm_context is not NULL, i.e., cannot
4641 * be in unloaded state 4643 * be in unloaded state
4642 */ 4644 */
4643 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); 4645 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4644 break; 4646 break;
4645 case PFM_CTX_LOADED: 4647 case PFM_CTX_LOADED:
4646 case PFM_CTX_MASKED: 4648 case PFM_CTX_MASKED:
4647 ret = pfm_context_unload(ctx, NULL, 0, regs); 4649 ret = pfm_context_unload(ctx, NULL, 0, regs);
4648 if (ret) { 4650 if (ret) {
4649 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); 4651 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4650 } 4652 }
4651 DPRINT(("ctx unloaded for current state was %d\n", state)); 4653 DPRINT(("ctx unloaded for current state was %d\n", state));
4652 4654
@@ -4655,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task)
4655 case PFM_CTX_ZOMBIE: 4657 case PFM_CTX_ZOMBIE:
4656 ret = pfm_context_unload(ctx, NULL, 0, regs); 4658 ret = pfm_context_unload(ctx, NULL, 0, regs);
4657 if (ret) { 4659 if (ret) {
4658 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); 4660 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4659 } 4661 }
4660 free_ok = 1; 4662 free_ok = 1;
4661 break; 4663 break;
4662 default: 4664 default:
4663 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); 4665 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4664 break; 4666 break;
4665 } 4667 }
4666 UNPROTECT_CTX(ctx, flags); 4668 UNPROTECT_CTX(ctx, flags);
@@ -4744,7 +4746,7 @@ recheck:
4744 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", 4746 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4745 ctx->ctx_fd, 4747 ctx->ctx_fd,
4746 state, 4748 state,
4747 task->pid, 4749 task_pid_nr(task),
4748 task->state, PFM_CMD_STOPPED(cmd))); 4750 task->state, PFM_CMD_STOPPED(cmd)));
4749 4751
4750 /* 4752 /*
@@ -4791,7 +4793,7 @@ recheck:
4791 */ 4793 */
4792 if (PFM_CMD_STOPPED(cmd)) { 4794 if (PFM_CMD_STOPPED(cmd)) {
4793 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 4795 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
4794 DPRINT(("[%d] task not in stopped state\n", task->pid)); 4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4795 return -EBUSY; 4797 return -EBUSY;
4796 } 4798 }
4797 /* 4799 /*
@@ -4884,7 +4886,7 @@ restart_args:
4884 * limit abuse to min page size 4886 * limit abuse to min page size
4885 */ 4887 */
4886 if (unlikely(sz > PFM_MAX_ARGSIZE)) { 4888 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4887 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); 4889 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4888 return -E2BIG; 4890 return -E2BIG;
4889 } 4891 }
4890 4892
@@ -5031,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5031{ 5033{
5032 int ret; 5034 int ret;
5033 5035
5034 DPRINT(("entering for [%d]\n", current->pid)); 5036 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5035 5037
5036 ret = pfm_context_unload(ctx, NULL, 0, regs); 5038 ret = pfm_context_unload(ctx, NULL, 0, regs);
5037 if (ret) { 5039 if (ret) {
5038 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); 5040 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5039 } 5041 }
5040 5042
5041 /* 5043 /*
@@ -5072,7 +5074,7 @@ pfm_handle_work(void)
5072 5074
5073 ctx = PFM_GET_CTX(current); 5075 ctx = PFM_GET_CTX(current);
5074 if (ctx == NULL) { 5076 if (ctx == NULL) {
5075 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); 5077 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
5076 return; 5078 return;
5077 } 5079 }
5078 5080
@@ -5269,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5269 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " 5271 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5270 "used_pmds=0x%lx\n", 5272 "used_pmds=0x%lx\n",
5271 pmc0, 5273 pmc0,
5272 task ? task->pid: -1, 5274 task ? task_pid_nr(task): -1,
5273 (regs ? regs->cr_iip : 0), 5275 (regs ? regs->cr_iip : 0),
5274 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", 5276 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5275 ctx->ctx_used_pmds[0])); 5277 ctx->ctx_used_pmds[0]));
@@ -5458,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5458 } 5460 }
5459 5461
5460 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", 5462 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5461 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, 5463 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5462 PFM_GET_WORK_PENDING(task), 5464 PFM_GET_WORK_PENDING(task),
5463 ctx->ctx_fl_trap_reason, 5465 ctx->ctx_fl_trap_reason,
5464 ovfl_pmds, 5466 ovfl_pmds,
@@ -5483,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5483sanity_check: 5485sanity_check:
5484 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", 5486 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5485 smp_processor_id(), 5487 smp_processor_id(),
5486 task ? task->pid : -1, 5488 task ? task_pid_nr(task) : -1,
5487 pmc0); 5489 pmc0);
5488 return; 5490 return;
5489 5491
@@ -5516,7 +5518,7 @@ stop_monitoring:
5516 * 5518 *
5517 * Overall pretty hairy stuff.... 5519 * Overall pretty hairy stuff....
5518 */ 5520 */
5519 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); 5521 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5520 pfm_clear_psr_up(); 5522 pfm_clear_psr_up();
5521 ia64_psr(regs)->up = 0; 5523 ia64_psr(regs)->up = 0;
5522 ia64_psr(regs)->sp = 1; 5524 ia64_psr(regs)->sp = 1;
@@ -5577,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5577 5579
5578report_spurious1: 5580report_spurious1:
5579 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", 5581 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5580 this_cpu, task->pid); 5582 this_cpu, task_pid_nr(task));
5581 pfm_unfreeze_pmu(); 5583 pfm_unfreeze_pmu();
5582 return -1; 5584 return -1;
5583report_spurious2: 5585report_spurious2:
5584 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", 5586 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5585 this_cpu, 5587 this_cpu,
5586 task->pid); 5588 task_pid_nr(task));
5587 pfm_unfreeze_pmu(); 5589 pfm_unfreeze_pmu();
5588 return -1; 5590 return -1;
5589} 5591}
@@ -5870,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5870 ia64_psr(regs)->sp = 1; 5872 ia64_psr(regs)->sp = 1;
5871 5873
5872 if (GET_PMU_OWNER() == task) { 5874 if (GET_PMU_OWNER() == task) {
5873 DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); 5875 DPRINT(("cleared ownership for [%d]\n",
5876 task_pid_nr(ctx->ctx_task)));
5874 SET_PMU_OWNER(NULL, NULL); 5877 SET_PMU_OWNER(NULL, NULL);
5875 } 5878 }
5876 5879
@@ -5882,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5882 task->thread.pfm_context = NULL; 5885 task->thread.pfm_context = NULL;
5883 task->thread.flags &= ~IA64_THREAD_PM_VALID; 5886 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5884 5887
5885 DPRINT(("force cleanup for [%d]\n", task->pid)); 5888 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5886} 5889}
5887 5890
5888 5891
@@ -6426,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6426 6429
6427 if (PMD_IS_COUNTING(i)) { 6430 if (PMD_IS_COUNTING(i)) {
6428 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", 6431 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6429 task->pid, 6432 task_pid_nr(task),
6430 i, 6433 i,
6431 ctx->ctx_pmds[i].val, 6434 ctx->ctx_pmds[i].val,
6432 val & ovfl_val)); 6435 val & ovfl_val));
@@ -6448,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6448 */ 6451 */
6449 if (pmc0 & (1UL << i)) { 6452 if (pmc0 & (1UL << i)) {
6450 val += 1 + ovfl_val; 6453 val += 1 + ovfl_val;
6451 DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); 6454 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6452 } 6455 }
6453 } 6456 }
6454 6457
6455 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); 6458 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6456 6459
6457 if (is_self) ctx->th_pmds[i] = pmd_val; 6460 if (is_self) ctx->th_pmds[i] = pmd_val;
6458 6461
@@ -6793,14 +6796,14 @@ dump_pmu_state(const char *from)
6793 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", 6796 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6794 this_cpu, 6797 this_cpu,
6795 from, 6798 from,
6796 current->pid, 6799 task_pid_nr(current),
6797 regs->cr_iip, 6800 regs->cr_iip,
6798 current->comm); 6801 current->comm);
6799 6802
6800 task = GET_PMU_OWNER(); 6803 task = GET_PMU_OWNER();
6801 ctx = GET_PMU_CTX(); 6804 ctx = GET_PMU_CTX();
6802 6805
6803 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); 6806 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6804 6807
6805 psr = pfm_get_psr(); 6808 psr = pfm_get_psr();
6806 6809
@@ -6848,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6848{ 6851{
6849 struct thread_struct *thread; 6852 struct thread_struct *thread;
6850 6853
6851 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); 6854 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6852 6855
6853 thread = &task->thread; 6856 thread = &task->thread;
6854 6857
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index ff80eab83b38..a7af1cb419f9 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -44,11 +44,11 @@ default_validate(struct task_struct *task, unsigned int flags, int cpu, void *da
44 int ret = 0; 44 int ret = 0;
45 45
46 if (data == NULL) { 46 if (data == NULL) {
47 DPRINT(("[%d] no argument passed\n", task->pid)); 47 DPRINT(("[%d] no argument passed\n", task_pid_nr(task)));
48 return -EINVAL; 48 return -EINVAL;
49 } 49 }
50 50
51 DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu)); 51 DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu));
52 52
53 /* 53 /*
54 * must hold at least the buffer header + one minimally sized entry 54 * must hold at least the buffer header + one minimally sized entry
@@ -88,7 +88,7 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v
88 hdr->hdr_count = 0UL; 88 hdr->hdr_count = 0UL;
89 89
90 DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n", 90 DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
91 task->pid, 91 task_pid_nr(task),
92 buf, 92 buf,
93 hdr->hdr_buf_size, 93 hdr->hdr_buf_size,
94 sizeof(*hdr), 94 sizeof(*hdr),
@@ -245,7 +245,7 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru
245static int 245static int
246default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) 246default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
247{ 247{
248 DPRINT(("[%d] exit(%p)\n", task->pid, buf)); 248 DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf));
249 return 0; 249 return 0;
250} 250}
251 251
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index c613fc0e91cc..2418289ee5ca 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -105,7 +105,8 @@ show_regs (struct pt_regs *regs)
105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; 105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
106 106
107 print_modules(); 107 print_modules();
108 printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); 108 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
109 smp_processor_id(), current->comm);
109 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", 110 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
110 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); 111 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
111 print_symbol("ip is at %s\n", ip); 112 print_symbol("ip is at %s\n", ip);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c5cfcfa4c87c..cbf67f1aa291 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -208,6 +208,48 @@ static int __init register_memory(void)
208 208
209__initcall(register_memory); 209__initcall(register_memory);
210 210
211
212#ifdef CONFIG_KEXEC
213static void __init setup_crashkernel(unsigned long total, int *n)
214{
215 unsigned long long base = 0, size = 0;
216 int ret;
217
218 ret = parse_crashkernel(boot_command_line, total,
219 &size, &base);
220 if (ret == 0 && size > 0) {
221 if (!base) {
222 sort_regions(rsvd_region, *n);
223 base = kdump_find_rsvd_region(size,
224 rsvd_region, *n);
225 }
226 if (base != ~0UL) {
227 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
228 "for crashkernel (System RAM: %ldMB)\n",
229 (unsigned long)(size >> 20),
230 (unsigned long)(base >> 20),
231 (unsigned long)(total >> 20));
232 rsvd_region[*n].start =
233 (unsigned long)__va(base);
234 rsvd_region[*n].end =
235 (unsigned long)__va(base + size);
236 (*n)++;
237 crashk_res.start = base;
238 crashk_res.end = base + size - 1;
239 }
240 }
241 efi_memmap_res.start = ia64_boot_param->efi_memmap;
242 efi_memmap_res.end = efi_memmap_res.start +
243 ia64_boot_param->efi_memmap_size;
244 boot_param_res.start = __pa(ia64_boot_param);
245 boot_param_res.end = boot_param_res.start +
246 sizeof(*ia64_boot_param);
247}
248#else
249static inline void __init setup_crashkernel(unsigned long total, int *n)
250{}
251#endif
252
211/** 253/**
212 * reserve_memory - setup reserved memory areas 254 * reserve_memory - setup reserved memory areas
213 * 255 *
@@ -219,6 +261,7 @@ void __init
219reserve_memory (void) 261reserve_memory (void)
220{ 262{
221 int n = 0; 263 int n = 0;
264 unsigned long total_memory;
222 265
223 /* 266 /*
224 * none of the entries in this table overlap 267 * none of the entries in this table overlap
@@ -254,50 +297,11 @@ reserve_memory (void)
254 n++; 297 n++;
255#endif 298#endif
256 299
257 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 300 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
258 n++; 301 n++;
259 302
260#ifdef CONFIG_KEXEC 303 setup_crashkernel(total_memory, &n);
261 /* crashkernel=size@offset specifies the size to reserve for a crash 304
262 * kernel. If offset is 0, then it is determined automatically.
263 * By reserving this memory we guarantee that linux never set's it
264 * up as a DMA target.Useful for holding code to do something
265 * appropriate after a kernel panic.
266 */
267 {
268 char *from = strstr(boot_command_line, "crashkernel=");
269 unsigned long base, size;
270 if (from) {
271 size = memparse(from + 12, &from);
272 if (*from == '@')
273 base = memparse(from+1, &from);
274 else
275 base = 0;
276 if (size) {
277 if (!base) {
278 sort_regions(rsvd_region, n);
279 base = kdump_find_rsvd_region(size,
280 rsvd_region, n);
281 }
282 if (base != ~0UL) {
283 rsvd_region[n].start =
284 (unsigned long)__va(base);
285 rsvd_region[n].end =
286 (unsigned long)__va(base + size);
287 n++;
288 crashk_res.start = base;
289 crashk_res.end = base + size - 1;
290 }
291 }
292 }
293 efi_memmap_res.start = ia64_boot_param->efi_memmap;
294 efi_memmap_res.end = efi_memmap_res.start +
295 ia64_boot_param->efi_memmap_size;
296 boot_param_res.start = __pa(ia64_boot_param);
297 boot_param_res.end = boot_param_res.start +
298 sizeof(*ia64_boot_param);
299 }
300#endif
301 /* end of memory marker */ 305 /* end of memory marker */
302 rsvd_region[n].start = ~0UL; 306 rsvd_region[n].start = ~0UL;
303 rsvd_region[n].end = ~0UL; 307 rsvd_region[n].end = ~0UL;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index aeec8184e862..cdb64cc4d9c8 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -227,7 +227,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
227 si.si_signo = SIGSEGV; 227 si.si_signo = SIGSEGV;
228 si.si_errno = 0; 228 si.si_errno = 0;
229 si.si_code = SI_KERNEL; 229 si.si_code = SI_KERNEL;
230 si.si_pid = current->pid; 230 si.si_pid = task_pid_vnr(current);
231 si.si_uid = current->uid; 231 si.si_uid = current->uid;
232 si.si_addr = sc; 232 si.si_addr = sc;
233 force_sig_info(SIGSEGV, &si, current); 233 force_sig_info(SIGSEGV, &si, current);
@@ -332,7 +332,7 @@ force_sigsegv_info (int sig, void __user *addr)
332 si.si_signo = SIGSEGV; 332 si.si_signo = SIGSEGV;
333 si.si_errno = 0; 333 si.si_errno = 0;
334 si.si_code = SI_KERNEL; 334 si.si_code = SI_KERNEL;
335 si.si_pid = current->pid; 335 si.si_pid = task_pid_vnr(current);
336 si.si_uid = current->uid; 336 si.si_uid = current->uid;
337 si.si_addr = addr; 337 si.si_addr = addr;
338 force_sig_info(SIGSEGV, &si, current); 338 force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 98cfc90cab1d..2bb84214e5f1 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -371,6 +371,11 @@ ia64_setup_printk_clock(void)
371 ia64_printk_clock = ia64_itc_printk_clock; 371 ia64_printk_clock = ia64_itc_printk_clock;
372} 372}
373 373
374/* IA64 doesn't cache the timezone */
375void update_vsyscall_tz(void)
376{
377}
378
374void update_vsyscall(struct timespec *wall, struct clocksource *c) 379void update_vsyscall(struct timespec *wall, struct clocksource *c)
375{ 380{
376 unsigned long flags; 381 unsigned long flags;
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 3aeaf15e468b..78d65cb947d2 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -61,7 +61,7 @@ die (const char *str, struct pt_regs *regs, long err)
61 61
62 if (++die.lock_owner_depth < 3) { 62 if (++die.lock_owner_depth < 3) {
63 printk("%s[%d]: %s %ld [%d]\n", 63 printk("%s[%d]: %s %ld [%d]\n",
64 current->comm, current->pid, str, err, ++die_counter); 64 current->comm, task_pid_nr(current), str, err, ++die_counter);
65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); 65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
66 show_regs(regs); 66 show_regs(regs);
67 } else 67 } else
@@ -315,7 +315,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
315 last.time = current_jiffies + 5 * HZ; 315 last.time = current_jiffies + 5 * HZ;
316 printk(KERN_WARNING 316 printk(KERN_WARNING
317 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 317 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
318 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 318 current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
319 } 319 }
320 } 320 }
321 } 321 }
@@ -453,7 +453,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
453 if (code == 8) { 453 if (code == 8) {
454# ifdef CONFIG_IA64_PRINT_HAZARDS 454# ifdef CONFIG_IA64_PRINT_HAZARDS
455 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", 455 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
456 current->comm, current->pid, 456 current->comm, task_pid_nr(current),
457 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr); 457 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
458# endif 458# endif
459 return; 459 return;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index fe6aa5a9f8fa..2173de9fe917 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1340,7 +1340,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
1340 size_t len; 1340 size_t len;
1341 1341
1342 len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, " 1342 len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
1343 "ip=0x%016lx\n\r", current->comm, current->pid, 1343 "ip=0x%016lx\n\r", current->comm,
1344 task_pid_nr(current),
1344 ifa, regs->cr_iip + ipsr->ri); 1345 ifa, regs->cr_iip + ipsr->ri);
1345 /* 1346 /*
1346 * Don't call tty_write_message() if we're in the kernel; we might 1347 * Don't call tty_write_message() if we're in the kernel; we might
@@ -1363,7 +1364,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
1363 "administrator\n" 1364 "administrator\n"
1364 "echo 0 > /proc/sys/kernel/ignore-" 1365 "echo 0 > /proc/sys/kernel/ignore-"
1365 "unaligned-usertrap to re-enable\n", 1366 "unaligned-usertrap to re-enable\n",
1366 current->comm, current->pid); 1367 current->comm, task_pid_nr(current));
1367 } 1368 }
1368 } 1369 }
1369 } else { 1370 } else {
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 32f26253c4e8..7571076a16a1 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -274,7 +274,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
274 274
275 out_of_memory: 275 out_of_memory:
276 up_read(&mm->mmap_sem); 276 up_read(&mm->mmap_sem);
277 if (is_init(current)) { 277 if (is_global_init(current)) {
278 yield(); 278 yield();
279 down_read(&mm->mmap_sem); 279 down_read(&mm->mmap_sem);
280 goto survive; 280 goto survive;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3e10152abbf0..c6c19bf11bec 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -127,8 +127,8 @@ ia64_init_addr_space (void)
127 vma->vm_mm = current->mm; 127 vma->vm_mm = current->mm;
128 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 128 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
129 vma->vm_end = vma->vm_start + PAGE_SIZE; 129 vma->vm_end = vma->vm_start + PAGE_SIZE;
130 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
131 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; 130 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
131 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
132 down_write(&current->mm->mmap_sem); 132 down_write(&current->mm->mmap_sem);
133 if (insert_vm_struct(current->mm, vma)) { 133 if (insert_vm_struct(current->mm, vma)) {
134 up_write(&current->mm->mmap_sem); 134 up_write(&current->mm->mmap_sem);
diff --git a/arch/ia64/oprofile/Kconfig b/arch/ia64/oprofile/Kconfig
deleted file mode 100644
index 97271ab484dc..000000000000
--- a/arch/ia64/oprofile/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7config OPROFILE
8 tristate "OProfile system profiling (EXPERIMENTAL)"
9 depends on PROFILING
10 help
11 OProfile is a profiling system capable of profiling the
12 whole system, include the kernel, kernel modules, libraries,
13 and applications.
14
15 Due to firmware bugs, you may need to use the "nohalt" boot
16 option if you're using OProfile with the hardware performance
17 counters.
18
19 If unsure, say N.
20
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index e58fcadff2e9..a5df672d8392 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -269,8 +269,9 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
269 skb->protocol = eth_type_trans(skb, xpnet_device); 269 skb->protocol = eth_type_trans(skb, xpnet_device);
270 skb->ip_summed = CHECKSUM_UNNECESSARY; 270 skb->ip_summed = CHECKSUM_UNNECESSARY;
271 271
272 dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " 272 dev_dbg(xpnet, "passing skb to network layer\n"
273 "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", 273 KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
274 "skb->end=0x%p skb->len=%d\n",
274 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 275 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
275 skb_end_pointer(skb), skb->len); 276 skb_end_pointer(skb), skb->len);
276 277
@@ -576,10 +577,10 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
576 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); 577 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
577 msg->buf_pa = __pa(start_addr); 578 msg->buf_pa = __pa(start_addr);
578 579
579 dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" 580 dev_dbg(xpnet, "sending XPC message to %d:%d\n"
580 "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " 581 KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
581 "msg->tailout_ignore=%u\n", dest_partid, 582 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
582 XPC_NET_CHANNEL, msg->buf_pa, msg->size, 583 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
583 msg->leadin_ignore, msg->tailout_ignore); 584 msg->leadin_ignore, msg->tailout_ignore);
584 585
585 586