aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-17 00:15:18 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-17 00:15:18 -0400
commit5b58e21a27028a9f0399449d8bc8494fd9d9ff70 (patch)
treefba49cc745f791755a7e86d518376a740fd39f0a /arch
parent52ade9b3b97fd3bea42842a056fe0786c28d0555 (diff)
parent576fe0bd7e52dce7afb6b9b2450744555b2eb53a (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] optimize pagefaults a little [IA64] Fix section conflict of ia64_mlogbuf_finish [IA64] s/scalibility/scalability/ [IA64] kdump on INIT needs multi-nodes sync-up (v.2) [IA64] wire up {signal,timer,event}fd syscalls [IA64] spelling fixes: arch/ia64/
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/crash.c24
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/irq.c6
-rw-r--r--arch/ia64/kernel/irq_lsapic.c2
-rw-r--r--arch/ia64/kernel/kprobes.c15
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/mca_drv.c4
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/kernel/perfmon.c18
-rw-r--r--arch/ia64/kernel/perfmon_mckinley.h2
-rw-r--r--arch/ia64/kernel/sal.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smp.c12
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/fault.c41
-rw-r--r--arch/ia64/sn/kernel/bte.c12
-rw-r--r--arch/ia64/sn/kernel/bte_error.c4
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c8
-rw-r--r--arch/ia64/sn/kernel/xpnet.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c8
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c6
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c6
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c16
32 files changed, 111 insertions, 117 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 3549c94467b8..c4784494970e 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -791,7 +791,7 @@ static __init int setup_additional_cpus(char *s)
791early_param("additional_cpus", setup_additional_cpus); 791early_param("additional_cpus", setup_additional_cpus);
792 792
793/* 793/*
794 * cpu_possible_map should be static, it cannot change as cpu's 794 * cpu_possible_map should be static, it cannot change as CPUs
795 * are onlined, or offlined. The reason is per-cpu data-structures 795 * are onlined, or offlined. The reason is per-cpu data-structures
796 * are allocated by some modules at init time, and dont expect to 796 * are allocated by some modules at init time, and dont expect to
797 * do this dynamically on cpu arrival/departure. 797 * do this dynamically on cpu arrival/departure.
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index aeb79fb28f0b..1d64ef478dde 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -156,24 +156,30 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
156 if (!kdump_on_init) 156 if (!kdump_on_init)
157 return NOTIFY_DONE; 157 return NOTIFY_DONE;
158 158
159 if (val != DIE_INIT_MONARCH_ENTER && 159 if (val != DIE_INIT_MONARCH_LEAVE &&
160 val != DIE_INIT_SLAVE_ENTER && 160 val != DIE_INIT_SLAVE_LEAVE &&
161 val != DIE_INIT_MONARCH_PROCESS &&
161 val != DIE_MCA_RENDZVOUS_LEAVE && 162 val != DIE_MCA_RENDZVOUS_LEAVE &&
162 val != DIE_MCA_MONARCH_LEAVE) 163 val != DIE_MCA_MONARCH_LEAVE)
163 return NOTIFY_DONE; 164 return NOTIFY_DONE;
164 165
165 nd = (struct ia64_mca_notify_die *)args->err; 166 nd = (struct ia64_mca_notify_die *)args->err;
166 /* Reason code 1 means machine check rendezous*/ 167 /* Reason code 1 means machine check rendezvous*/
167 if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) && 168 if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE
168 nd->sos->rv_rc == 1) 169 || val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1)
169 return NOTIFY_DONE; 170 return NOTIFY_DONE;
170 171
171 switch (val) { 172 switch (val) {
172 case DIE_INIT_MONARCH_ENTER: 173 case DIE_INIT_MONARCH_PROCESS:
174 atomic_set(&kdump_in_progress, 1);
175 *(nd->monarch_cpu) = -1;
176 break;
177 case DIE_INIT_MONARCH_LEAVE:
173 machine_kdump_on_init(); 178 machine_kdump_on_init();
174 break; 179 break;
175 case DIE_INIT_SLAVE_ENTER: 180 case DIE_INIT_SLAVE_LEAVE:
176 unw_init_running(kdump_cpu_freeze, NULL); 181 if (atomic_read(&kdump_in_progress))
182 unw_init_running(kdump_cpu_freeze, NULL);
177 break; 183 break;
178 case DIE_MCA_RENDZVOUS_LEAVE: 184 case DIE_MCA_RENDZVOUS_LEAVE:
179 if (atomic_read(&kdump_in_progress)) 185 if (atomic_read(&kdump_in_progress))
@@ -215,8 +221,10 @@ static ctl_table sys_table[] = {
215static int 221static int
216machine_crash_setup(void) 222machine_crash_setup(void)
217{ 223{
224 /* be notified before default_monarch_init_process */
218 static struct notifier_block kdump_init_notifier_nb = { 225 static struct notifier_block kdump_init_notifier_nb = {
219 .notifier_call = kdump_init_notifier, 226 .notifier_call = kdump_init_notifier,
227 .priority = 1,
220 }; 228 };
221 int ret; 229 int ret;
222 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 230 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 144b056282af..95f517515235 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1585,5 +1585,8 @@ sys_call_table:
1585 data8 sys_getcpu 1585 data8 sys_getcpu
1586 data8 sys_epoll_pwait // 1305 1586 data8 sys_epoll_pwait // 1305
1587 data8 sys_utimensat 1587 data8 sys_utimensat
1588 data8 sys_signalfd
1589 data8 sys_timerfd
1590 data8 sys_eventfd
1588 1591
1589 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1592 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index b4c239685d2e..407b45870489 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 * 5 *
6 * This file contains the code used by various IRQ handling routines: 6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines 7 * asking for different IRQs should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers 8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers 9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier. 10 * should be easier.
@@ -12,7 +12,7 @@
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
13 * 13 *
14 * 4/14/2004: Added code to handle cpu migration and do safe irq 14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic 15 * migration without losing interrupts for iosapic
16 * architecture. 16 * architecture.
17 */ 17 */
18 18
@@ -190,7 +190,7 @@ void fixup_irqs(void)
190 } 190 }
191 191
192 /* 192 /*
193 * Phase 1: Locate irq's bound to this cpu and 193 * Phase 1: Locate IRQs bound to this cpu and
194 * relocate them for cpu removal. 194 * relocate them for cpu removal.
195 */ 195 */
196 migrate_irqs(); 196 migrate_irqs();
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index c2f07beb1759..e56a7a36aca3 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -23,7 +23,7 @@ lsapic_noop_startup (unsigned int irq)
23static void 23static void
24lsapic_noop (unsigned int irq) 24lsapic_noop (unsigned int irq)
25{ 25{
26 /* nuthing to do... */ 26 /* nothing to do... */
27} 27}
28 28
29static int lsapic_retrigger(unsigned int irq) 29static int lsapic_retrigger(unsigned int irq)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 72e593e94053..5bc46f151344 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -151,12 +151,12 @@ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
151 151
152 cmp_inst.l = kprobe_inst; 152 cmp_inst.l = kprobe_inst;
153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { 153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
154 /* Integere compare - Register Register (A6 type)*/ 154 /* Integer compare - Register Register (A6 type)*/
155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) 155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
156 &&(cmp_inst.f.c == 1)) 156 &&(cmp_inst.f.c == 1))
157 ctype_unc = 1; 157 ctype_unc = 1;
158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { 158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
159 /* Integere compare - Immediate Register (A8 type)*/ 159 /* Integer compare - Immediate Register (A8 type)*/
160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) 160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
161 ctype_unc = 1; 161 ctype_unc = 1;
162 } 162 }
@@ -820,7 +820,7 @@ out:
820 return 1; 820 return 1;
821} 821}
822 822
823static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) 823int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
824{ 824{
825 struct kprobe *cur = kprobe_running(); 825 struct kprobe *cur = kprobe_running();
826 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 826 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -904,13 +904,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
904 if (post_kprobes_handler(args->regs)) 904 if (post_kprobes_handler(args->regs))
905 ret = NOTIFY_STOP; 905 ret = NOTIFY_STOP;
906 break; 906 break;
907 case DIE_PAGE_FAULT:
908 /* kprobe_running() needs smp_processor_id() */
909 preempt_disable();
910 if (kprobe_running() &&
911 kprobes_fault_handler(args->regs, args->trapnr))
912 ret = NOTIFY_STOP;
913 preempt_enable();
914 default: 907 default:
915 break; 908 break;
916 } 909 }
@@ -954,7 +947,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
954 /* 947 /*
955 * Callee owns the argument space and could overwrite it, eg 948 * Callee owns the argument space and could overwrite it, eg
956 * tail call optimization. So to be absolutely safe 949 * tail call optimization. So to be absolutely safe
957 * we save the argument space before transfering the control 950 * we save the argument space before transferring the control
958 * to instrumented jprobe function which runs in 951 * to instrumented jprobe function which runs in
959 * the process context 952 * the process context
960 */ 953 */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 26814de6c29a..1ead5ea6c5ce 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -273,7 +273,6 @@ static void ia64_mlogbuf_finish(int wait)
273 273
274 mlogbuf_finished = 1; 274 mlogbuf_finished = 1;
275} 275}
276EXPORT_SYMBOL(ia64_mlogbuf_finish);
277 276
278/* 277/*
279 * Print buffered messages from INIT context. 278 * Print buffered messages from INIT context.
@@ -1477,6 +1476,10 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
1477 struct task_struct *g, *t; 1476 struct task_struct *g, *t;
1478 if (val != DIE_INIT_MONARCH_PROCESS) 1477 if (val != DIE_INIT_MONARCH_PROCESS)
1479 return NOTIFY_DONE; 1478 return NOTIFY_DONE;
1479#ifdef CONFIG_KEXEC
1480 if (atomic_read(&kdump_in_progress))
1481 return NOTIFY_DONE;
1482#endif
1480 1483
1481 /* 1484 /*
1482 * FIXME: mlogbuf will brim over with INIT stack dumps. 1485 * FIXME: mlogbuf will brim over with INIT stack dumps.
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 70b8bdbb7e6f..aba813c2c150 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -438,7 +438,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
438 * @peidx: pointer of index of processor error section 438 * @peidx: pointer of index of processor error section
439 * 439 *
440 * Return value: 440 * Return value:
441 * target address on Success / 0 on Failue 441 * target address on Success / 0 on Failure
442 */ 442 */
443static u64 443static u64
444get_target_identifier(peidx_table_t *peidx) 444get_target_identifier(peidx_table_t *peidx)
@@ -701,7 +701,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
701 return fatal_mca("External bus check fatal status"); 701 return fatal_mca("External bus check fatal status");
702 702
703 /* 703 /*
704 * This is a local MCA and estimated as a recoverble error. 704 * This is a local MCA and estimated as a recoverable error.
705 */ 705 */
706 if (platform) 706 if (platform)
707 return recover_from_platform_error(slidx, peidx, pbci, sos); 707 return recover_from_platform_error(slidx, peidx, pbci, sos);
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 158e3c51bb77..196287928bae 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -861,7 +861,7 @@ apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
861/* 861/*
862 * Modules contain a single unwind table which covers both the core and the init text 862 * Modules contain a single unwind table which covers both the core and the init text
863 * sections but since the two are not contiguous, we need to split this table up such that 863 * sections but since the two are not contiguous, we need to split this table up such that
864 * we can register (and unregister) each "segment" seperately. Fortunately, this sounds 864 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
865 * more complicated than it really is. 865 * more complicated than it really is.
866 */ 866 */
867static void 867static void
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index e7191ca30b16..b7133cabdbea 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1318,7 +1318,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1318{ 1318{
1319 unsigned long flags; 1319 unsigned long flags;
1320 /* 1320 /*
1321 * validy checks on cpu_mask have been done upstream 1321 * validity checks on cpu_mask have been done upstream
1322 */ 1322 */
1323 LOCK_PFS(flags); 1323 LOCK_PFS(flags);
1324 1324
@@ -1384,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1384{ 1384{
1385 unsigned long flags; 1385 unsigned long flags;
1386 /* 1386 /*
1387 * validy checks on cpu_mask have been done upstream 1387 * validity checks on cpu_mask have been done upstream
1388 */ 1388 */
1389 LOCK_PFS(flags); 1389 LOCK_PFS(flags);
1390 1390
@@ -1835,7 +1835,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1835 /* 1835 /*
1836 * remove our file from the async queue, if we use this mode. 1836 * remove our file from the async queue, if we use this mode.
1837 * This can be done without the context being protected. We come 1837 * This can be done without the context being protected. We come
1838 * here when the context has become unreacheable by other tasks. 1838 * here when the context has become unreachable by other tasks.
1839 * 1839 *
1840 * We may still have active monitoring at this point and we may 1840 * We may still have active monitoring at this point and we may
1841 * end up in pfm_overflow_handler(). However, fasync_helper() 1841 * end up in pfm_overflow_handler(). However, fasync_helper()
@@ -2132,7 +2132,7 @@ doit:
2132 filp->private_data = NULL; 2132 filp->private_data = NULL;
2133 2133
2134 /* 2134 /*
2135 * if we free on the spot, the context is now completely unreacheable 2135 * if we free on the spot, the context is now completely unreachable
2136 * from the callers side. The monitored task side is also cut, so we 2136 * from the callers side. The monitored task side is also cut, so we
2137 * can freely cut. 2137 * can freely cut.
2138 * 2138 *
@@ -2562,7 +2562,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
2562 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; 2562 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2563 2563
2564 /* 2564 /*
2565 * bitmask of all PMDs that are accesible to this context 2565 * bitmask of all PMDs that are accessible to this context
2566 */ 2566 */
2567 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; 2567 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2568 2568
@@ -3395,7 +3395,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3395 if (unlikely(!PMD_IS_IMPL(cnum))) goto error; 3395 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3396 /* 3396 /*
3397 * we can only read the register that we use. That includes 3397 * we can only read the register that we use. That includes
3398 * the one we explicitely initialize AND the one we want included 3398 * the one we explicitly initialize AND the one we want included
3399 * in the sampling buffer (smpl_regs). 3399 * in the sampling buffer (smpl_regs).
3400 * 3400 *
3401 * Having this restriction allows optimization in the ctxsw routine 3401 * Having this restriction allows optimization in the ctxsw routine
@@ -3715,7 +3715,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3715 * if non-blocking, then we ensure that the task will go into 3715 * if non-blocking, then we ensure that the task will go into
3716 * pfm_handle_work() before returning to user mode. 3716 * pfm_handle_work() before returning to user mode.
3717 * 3717 *
3718 * We cannot explicitely reset another task, it MUST always 3718 * We cannot explicitly reset another task, it MUST always
3719 * be done by the task itself. This works for system wide because 3719 * be done by the task itself. This works for system wide because
3720 * the tool that is controlling the session is logically doing 3720 * the tool that is controlling the session is logically doing
3721 * "self-monitoring". 3721 * "self-monitoring".
@@ -4644,7 +4644,7 @@ pfm_exit_thread(struct task_struct *task)
4644 switch(state) { 4644 switch(state) {
4645 case PFM_CTX_UNLOADED: 4645 case PFM_CTX_UNLOADED:
4646 /* 4646 /*
4647 * only comes to thios function if pfm_context is not NULL, i.e., cannot 4647 * only comes to this function if pfm_context is not NULL, i.e., cannot
4648 * be in unloaded state 4648 * be in unloaded state
4649 */ 4649 */
4650 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); 4650 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
@@ -5247,7 +5247,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
5247 5247
5248/* 5248/*
5249 * main overflow processing routine. 5249 * main overflow processing routine.
5250 * it can be called from the interrupt path or explicitely during the context switch code 5250 * it can be called from the interrupt path or explicitly during the context switch code
5251 */ 5251 */
5252static void 5252static void
5253pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) 5253pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
diff --git a/arch/ia64/kernel/perfmon_mckinley.h b/arch/ia64/kernel/perfmon_mckinley.h
index 9becccda2897..c4bec7a9d18f 100644
--- a/arch/ia64/kernel/perfmon_mckinley.h
+++ b/arch/ia64/kernel/perfmon_mckinley.h
@@ -181,7 +181,7 @@ static pmu_config_t pmu_conf_mck={
181 .pmc_desc = pfm_mck_pmc_desc, 181 .pmc_desc = pfm_mck_pmc_desc,
182 .num_ibrs = 8, 182 .num_ibrs = 8,
183 .num_dbrs = 8, 183 .num_dbrs = 8,
184 .use_rr_dbregs = 1 /* debug register are use for range retrictions */ 184 .use_rr_dbregs = 1 /* debug register are use for range restrictions */
185}; 185};
186 186
187 187
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 37c876f95dba..27c2ef445a56 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -134,7 +134,7 @@ set_smp_redirect (int flag)
134 * interrupt redirection. The reason is this would require that 134 * interrupt redirection. The reason is this would require that
135 * All interrupts be stopped and hard bind the irq to a cpu. 135 * All interrupts be stopped and hard bind the irq to a cpu.
136 * Later when the interrupt is fired we need to set the redir hint 136 * Later when the interrupt is fired we need to set the redir hint
137 * on again in the vector. This is combersome for something that the 137 * on again in the vector. This is cumbersome for something that the
138 * user mode irq balancer will solve anyways. 138 * user mode irq balancer will solve anyways.
139 */ 139 */
140 no_int_routing=1; 140 no_int_routing=1;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 89f6b138a62c..25cd75f50ab1 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(data_saved_lock);
162/** salinfo_platform_oemdata - optional callback to decode oemdata from an error 162/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
163 * record. 163 * record.
164 * @sect_header: pointer to the start of the section to decode. 164 * @sect_header: pointer to the start of the section to decode.
165 * @oemdata: returns vmalloc area containing the decded output. 165 * @oemdata: returns vmalloc area containing the decoded output.
166 * @oemdata_size: returns length of decoded output (strlen). 166 * @oemdata_size: returns length of decoded output (strlen).
167 * 167 *
168 * Description: If user space asks for oem data to be decoded by the kernel 168 * Description: If user space asks for oem data to be decoded by the kernel
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 9df1efe7487d..eaa6a24bc0b6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -576,7 +576,7 @@ setup_arch (char **cmdline_p)
576} 576}
577 577
578/* 578/*
579 * Display cpu info for all cpu's. 579 * Display cpu info for all CPUs.
580 */ 580 */
581static int 581static int
582show_cpuinfo (struct seq_file *m, void *v) 582show_cpuinfo (struct seq_file *m, void *v)
@@ -761,7 +761,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
761 c->cpu = smp_processor_id(); 761 c->cpu = smp_processor_id();
762 762
763 /* below default values will be overwritten by identify_siblings() 763 /* below default values will be overwritten by identify_siblings()
764 * for Multi-Threading/Multi-Core capable cpu's 764 * for Multi-Threading/Multi-Core capable CPUs
765 */ 765 */
766 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 766 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
767 c->socket_id = -1; 767 c->socket_id = -1;
@@ -947,7 +947,7 @@ cpu_init (void)
947 ia32_cpu_init(); 947 ia32_cpu_init();
948#endif 948#endif
949 949
950 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 950 /* Clear ITC to eliminate sched_clock() overflows in human time. */
951 ia64_set_itc(0); 951 ia64_set_itc(0);
952 952
953 /* disable all local interrupt sources: */ 953 /* disable all local interrupt sources: */
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 221de3804560..b3a47f986e1e 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -186,7 +186,7 @@ handle_IPI (int irq, void *dev_id)
186} 186}
187 187
188/* 188/*
189 * Called with preeemption disabled. 189 * Called with preemption disabled.
190 */ 190 */
191static inline void 191static inline void
192send_IPI_single (int dest_cpu, int op) 192send_IPI_single (int dest_cpu, int op)
@@ -196,7 +196,7 @@ send_IPI_single (int dest_cpu, int op)
196} 196}
197 197
198/* 198/*
199 * Called with preeemption disabled. 199 * Called with preemption disabled.
200 */ 200 */
201static inline void 201static inline void
202send_IPI_allbutself (int op) 202send_IPI_allbutself (int op)
@@ -210,7 +210,7 @@ send_IPI_allbutself (int op)
210} 210}
211 211
212/* 212/*
213 * Called with preeemption disabled. 213 * Called with preemption disabled.
214 */ 214 */
215static inline void 215static inline void
216send_IPI_all (int op) 216send_IPI_all (int op)
@@ -223,7 +223,7 @@ send_IPI_all (int op)
223} 223}
224 224
225/* 225/*
226 * Called with preeemption disabled. 226 * Called with preemption disabled.
227 */ 227 */
228static inline void 228static inline void
229send_IPI_self (int op) 229send_IPI_self (int op)
@@ -252,7 +252,7 @@ kdump_smp_send_init(void)
252} 252}
253#endif 253#endif
254/* 254/*
255 * Called with preeemption disabled. 255 * Called with preemption disabled.
256 */ 256 */
257void 257void
258smp_send_reschedule (int cpu) 258smp_send_reschedule (int cpu)
@@ -261,7 +261,7 @@ smp_send_reschedule (int cpu)
261} 261}
262 262
263/* 263/*
264 * Called with preeemption disabled. 264 * Called with preemption disabled.
265 */ 265 */
266static void 266static void
267smp_send_local_flush_tlb (int cpu) 267smp_send_local_flush_tlb (int cpu)
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index a44792d0f3a9..542958079f1b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -694,7 +694,7 @@ int migrate_platform_irqs(unsigned int cpu)
694 set_cpei_target_cpu(new_cpei_cpu); 694 set_cpei_target_cpu(new_cpei_cpu);
695 desc = irq_desc + ia64_cpe_irq; 695 desc = irq_desc + ia64_cpe_irq;
696 /* 696 /*
697 * Switch for now, immediatly, we need to do fake intr 697 * Switch for now, immediately, we need to do fake intr
698 * as other interrupts, but need to study CPEI behaviour with 698 * as other interrupts, but need to study CPEI behaviour with
699 * polling before making changes. 699 * polling before making changes.
700 */ 700 */
@@ -840,7 +840,7 @@ __cpu_up (unsigned int cpu)
840} 840}
841 841
842/* 842/*
843 * Assume that CPU's have been discovered by some platform-dependent interface. For 843 * Assume that CPUs have been discovered by some platform-dependent interface. For
844 * SoftSDV/Lion, that would be ACPI. 844 * SoftSDV/Lion, that would be ACPI.
845 * 845 *
846 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). 846 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
@@ -854,7 +854,7 @@ init_smp_config(void)
854 } *ap_startup; 854 } *ap_startup;
855 long sal_ret; 855 long sal_ret;
856 856
857 /* Tell SAL where to drop the AP's. */ 857 /* Tell SAL where to drop the APs. */
858 ap_startup = (struct fptr *) start_ap; 858 ap_startup = (struct fptr *) start_ap;
859 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, 859 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
860 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); 860 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index b8e0d70bf989..15ad85da15a9 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -304,7 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
304 * Lower 4 bits are used as a count. Upper bits are a sequence 304 * Lower 4 bits are used as a count. Upper bits are a sequence
305 * number that is updated when count is reset. The cmpxchg will 305 * number that is updated when count is reset. The cmpxchg will
306 * fail is seqno has changed. This minimizes mutiple cpus 306 * fail is seqno has changed. This minimizes mutiple cpus
307 * reseting the count. 307 * resetting the count.
308 */ 308 */
309 if (current_jiffies > last.time) 309 if (current_jiffies > last.time)
310 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15)); 310 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index fe1426266b9b..7d3dd6cdafa4 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1999-2004 Hewlett-Packard Co 2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com> 3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com> 4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure. 5 * - Change pt_regs_off() to make it less dependent on pt_regs structure.
6 */ 6 */
7/* 7/*
8 * This file implements call frame unwind support for the Linux 8 * This file implements call frame unwind support for the Linux
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 38085ac18338..0dbf0e81f8c0 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -317,7 +317,7 @@ static void __meminit scatter_node_data(void)
317 * node_online_map is not set for hot-added nodes at this time, 317 * node_online_map is not set for hot-added nodes at this time,
318 * because we are halfway through initialization of the new node's 318 * because we are halfway through initialization of the new node's
319 * structures. If for_each_online_node() is used, a new node's 319 * structures. If for_each_online_node() is used, a new node's
320 * pg_data_ptrs will be not initialized. Insted of using it, 320 * pg_data_ptrs will be not initialized. Instead of using it,
321 * pgdat_list[] is checked. 321 * pgdat_list[] is checked.
322 */ 322 */
323 for_each_node(node) { 323 for_each_node(node) {
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 21658e02116c..b87f785c2416 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -19,36 +19,24 @@
19extern void die (char *, struct pt_regs *, long); 19extern void die (char *, struct pt_regs *, long);
20 20
21#ifdef CONFIG_KPROBES 21#ifdef CONFIG_KPROBES
22ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 22static inline int notify_page_fault(struct pt_regs *regs, int trap)
23
24/* Hook to register for page fault notifications */
25int register_page_fault_notifier(struct notifier_block *nb)
26{
27 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
28}
29
30int unregister_page_fault_notifier(struct notifier_block *nb)
31{ 23{
32 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb); 24 int ret = 0;
33} 25
26 if (!user_mode(regs)) {
27 /* kprobe_running() needs smp_processor_id() */
28 preempt_disable();
29 if (kprobe_running() && kprobes_fault_handler(regs, trap))
30 ret = 1;
31 preempt_enable();
32 }
34 33
35static inline int notify_page_fault(enum die_val val, const char *str, 34 return ret;
36 struct pt_regs *regs, long err, int trap, int sig)
37{
38 struct die_args args = {
39 .regs = regs,
40 .str = str,
41 .err = err,
42 .trapnr = trap,
43 .signr = sig
44 };
45 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
46} 35}
47#else 36#else
48static inline int notify_page_fault(enum die_val val, const char *str, 37static inline int notify_page_fault(struct pt_regs *regs, int trap)
49 struct pt_regs *regs, long err, int trap, int sig)
50{ 38{
51 return NOTIFY_DONE; 39 return 0;
52} 40}
53#endif 41#endif
54 42
@@ -117,8 +105,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
117 /* 105 /*
118 * This is to handle the kprobes on user space access instructions 106 * This is to handle the kprobes on user space access instructions
119 */ 107 */
120 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, 108 if (notify_page_fault(regs, TRAP_BRKPT))
121 SIGSEGV) == NOTIFY_STOP)
122 return; 109 return;
123 110
124 down_read(&mm->mmap_sem); 111 down_read(&mm->mmap_sem);
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ff1c55601178..b362d6d6a8c8 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -63,7 +63,7 @@ static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
63 * Use the block transfer engine to move kernel memory from src to dest 63 * Use the block transfer engine to move kernel memory from src to dest
64 * using the assigned mode. 64 * using the assigned mode.
65 * 65 *
66 * Paramaters: 66 * Parameters:
67 * src - physical address of the transfer source. 67 * src - physical address of the transfer source.
68 * dest - physical address of the transfer destination. 68 * dest - physical address of the transfer destination.
69 * len - number of bytes to transfer from source to dest. 69 * len - number of bytes to transfer from source to dest.
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(bte_copy);
247 * use the block transfer engine to move kernel 247 * use the block transfer engine to move kernel
248 * memory from src to dest using the assigned mode. 248 * memory from src to dest using the assigned mode.
249 * 249 *
250 * Paramaters: 250 * Parameters:
251 * src - physical address of the transfer source. 251 * src - physical address of the transfer source.
252 * dest - physical address of the transfer destination. 252 * dest - physical address of the transfer destination.
253 * len - number of bytes to transfer from source to dest. 253 * len - number of bytes to transfer from source to dest.
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(bte_copy);
255 * for IBCT0/1 in the SGI documentation. 255 * for IBCT0/1 in the SGI documentation.
256 * 256 *
257 * NOTE: If the source, dest, and len are all cache line aligned, 257 * NOTE: If the source, dest, and len are all cache line aligned,
258 * then it would be _FAR_ preferrable to use bte_copy instead. 258 * then it would be _FAR_ preferable to use bte_copy instead.
259 */ 259 */
260bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode) 260bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
261{ 261{
@@ -300,7 +300,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
300 * a standard bte copy. 300 * a standard bte copy.
301 * 301 *
302 * One nasty exception to the above rule is when the 302 * One nasty exception to the above rule is when the
303 * source and destination are not symetrically 303 * source and destination are not symmetrically
304 * mis-aligned. If the source offset from the first 304 * mis-aligned. If the source offset from the first
305 * cache line is different from the destination offset, 305 * cache line is different from the destination offset,
306 * we make the first section be the entire transfer 306 * we make the first section be the entire transfer
@@ -337,7 +337,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
337 337
338 if (footBcopyDest == (headBcopyDest + headBcopyLen)) { 338 if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
339 /* 339 /*
340 * We have two contigous bcopy 340 * We have two contiguous bcopy
341 * blocks. Merge them. 341 * blocks. Merge them.
342 */ 342 */
343 headBcopyLen += footBcopyLen; 343 headBcopyLen += footBcopyLen;
@@ -375,7 +375,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
375 } else { 375 } else {
376 376
377 /* 377 /*
378 * The transfer is not symetric, we will 378 * The transfer is not symmetric, we will
379 * allocate a buffer large enough for all the 379 * allocate a buffer large enough for all the
380 * data, bte_copy into that buffer and then 380 * data, bte_copy into that buffer and then
381 * bcopy to the destination. 381 * bcopy to the destination.
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index b6fcf8164f2b..27c5936ccfe9 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -105,7 +105,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
105 } 105 }
106 106
107 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id())); 107 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
108 /* Reenable both bte interfaces */ 108 /* Re-enable both bte interfaces */
109 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM); 109 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
110 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1; 110 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
111 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval); 111 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
@@ -243,7 +243,7 @@ bte_crb_error_handler(cnodeid_t cnode, int btenum,
243 243
244 /* 244 /*
245 * The caller has already figured out the error type, we save that 245 * The caller has already figured out the error type, we save that
246 * in the bte handle structure for the thread excercising the 246 * in the bte handle structure for the thread exercising the
247 * interface to consume. 247 * interface to consume.
248 */ 248 */
249 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET; 249 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 7ed72d3faf73..787ed642dd49 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -479,7 +479,7 @@ sn_io_early_init(void)
479 } 479 }
480 480
481 /* 481 /*
482 * prime sn_pci_provider[]. Individial provider init routines will 482 * prime sn_pci_provider[]. Individual provider init routines will
483 * override their respective default entries. 483 * override their respective default entries.
484 */ 484 */
485 485
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a9bed5ca2ed8..a574fcd163dd 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -167,7 +167,7 @@ void __init early_sn_setup(void)
167 * IO on SN2 is done via SAL calls, early_printk won't work without this. 167 * IO on SN2 is done via SAL calls, early_printk won't work without this.
168 * 168 *
169 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c. 169 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
170 * Any changes to those file may have to be made hereas well. 170 * Any changes to those file may have to be made here as well.
171 */ 171 */
172 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab); 172 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
173 config_tables = __va(efi_systab->tables); 173 config_tables = __va(efi_systab->tables);
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 5d318b579fb1..033c8a9f000e 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -104,7 +104,7 @@ static inline unsigned long wait_piowc(void)
104 * 104 *
105 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. 105 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
106 * Context switching user threads which have memory-mapped MMIO may cause 106 * Context switching user threads which have memory-mapped MMIO may cause
107 * PIOs to issue from seperate CPUs, thus the PIO writes must be drained 107 * PIOs to issue from separate CPUs, thus the PIO writes must be drained
108 * from the previous CPU's Shub before execution resumes on the new CPU. 108 * from the previous CPU's Shub before execution resumes on the new CPU.
109 */ 109 */
110void sn_migrate(struct task_struct *task) 110void sn_migrate(struct task_struct *task)
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c08db9c2375d..44ccc0d789c9 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -293,7 +293,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
293 293
294 294
295/* 295/*
296 * Pull the remote per partititon specific variables from the specified 296 * Pull the remote per partition specific variables from the specified
297 * partition. 297 * partition.
298 */ 298 */
299enum xpc_retval 299enum xpc_retval
@@ -461,7 +461,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
462 // >>> iterations of the for-loop, bail if set? 462 // >>> iterations of the for-loop, bail if set?
463 463
464 // >>> should we impose a minumum #of entries? like 4 or 8? 464 // >>> should we impose a minimum #of entries? like 4 or 8?
465 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 465 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
466 466
467 nbytes = nentries * ch->msg_size; 467 nbytes = nentries * ch->msg_size;
@@ -514,7 +514,7 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
515 // >>> iterations of the for-loop, bail if set? 515 // >>> iterations of the for-loop, bail if set?
516 516
517 // >>> should we impose a minumum #of entries? like 4 or 8? 517 // >>> should we impose a minimum #of entries? like 4 or 8?
518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
519 519
520 nbytes = nentries * ch->msg_size; 520 nbytes = nentries * ch->msg_size;
@@ -1478,7 +1478,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1478 1478
1479 1479
1480 /* 1480 /*
1481 * Before proceding with the teardown we have to wait until all 1481 * Before proceeding with the teardown we have to wait until all
1482 * existing references cease. 1482 * existing references cease.
1483 */ 1483 */
1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); 1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index da7213530972..e58fcadff2e9 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -531,7 +531,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
531 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); 531 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
532 532
533 /* 533 /*
534 * If we wanted to allow promiscous mode to work like an 534 * If we wanted to allow promiscuous mode to work like an
535 * unswitched network, this would be a good point to OR in a 535 * unswitched network, this would be a good point to OR in a
536 * mask of partitions which should be receiving all packets. 536 * mask of partitions which should be receiving all packets.
537 */ 537 */
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 7a291a271511..d79ddacfba2d 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -333,7 +333,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
333 /* 333 /*
334 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 334 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
335 * around hw issues at the pci bus level. SGI proms older than 335 * around hw issues at the pci bus level. SGI proms older than
336 * 4.10 don't implment this. 336 * 4.10 don't implement this.
337 */ 337 */
338 338
339 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 339 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
@@ -348,7 +348,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
348 /* 348 /*
349 * If the above failed, retry using the SAL_PROBE call which should 349 * If the above failed, retry using the SAL_PROBE call which should
350 * be present in all proms (but which cannot work round PCI chipset 350 * be present in all proms (but which cannot work round PCI chipset
351 * bugs). This code is retained for compatability with old 351 * bugs). This code is retained for compatibility with old
352 * pre-4.10 proms, and should be removed at some point in the future. 352 * pre-4.10 proms, and should be removed at some point in the future.
353 */ 353 */
354 354
@@ -379,7 +379,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
379 /* 379 /*
380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work 380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
381 * around hw issues at the pci bus level. SGI proms older than 381 * around hw issues at the pci bus level. SGI proms older than
382 * 4.10 don't implment this. 382 * 4.10 don't implement this.
383 */ 383 */
384 384
385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
@@ -394,7 +394,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
394 /* 394 /*
395 * If the above failed, retry using the SAL_PROBE call which should 395 * If the above failed, retry using the SAL_PROBE call which should
396 * be present in all proms (but which cannot work round PCI chipset 396 * be present in all proms (but which cannot work round PCI chipset
397 * bugs). This code is retained for compatability with old 397 * bugs). This code is retained for compatibility with old
398 * pre-4.10 proms, and should be removed at some point in the future. 398 * pre-4.10 proms, and should be removed at some point in the future.
399 */ 399 */
400 400
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 935029fc400d..239b3cedcf2b 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -30,7 +30,7 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number,
30 30
31/* 31/*
32 * find_free_ate: Find the first free ate index starting from the given 32 * find_free_ate: Find the first free ate index starting from the given
33 * index for the desired consequtive count. 33 * index for the desired consecutive count.
34 */ 34 */
35static int find_free_ate(struct ate_resource *ate_resource, int start, 35static int find_free_ate(struct ate_resource *ate_resource, int start,
36 int count) 36 int count)
@@ -88,7 +88,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
88 return -1; 88 return -1;
89 89
90 /* 90 /*
91 * Find the required number of free consequtive ates. 91 * Find the required number of free consecutive ates.
92 */ 92 */
93 start_index = 93 start_index =
94 find_free_ate(ate_resource, ate_resource->lowest_free_index, 94 find_free_ate(ate_resource, ate_resource->lowest_free_index,
@@ -105,7 +105,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
105/* 105/*
106 * Allocate "count" contiguous Bridge Address Translation Entries 106 * Allocate "count" contiguous Bridge Address Translation Entries
107 * on the specified bridge to be used for PCI to XTALK mappings. 107 * on the specified bridge to be used for PCI to XTALK mappings.
108 * Indices in rm map range from 1..num_entries. Indicies returned 108 * Indices in rm map range from 1..num_entries. Indices returned
109 * to caller range from 0..num_entries-1. 109 * to caller range from 0..num_entries-1.
110 * 110 *
111 * Return the start index on success, -1 on failure. 111 * Return the start index on success, -1 on failure.
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 95af40cb22f2..e626e50a938a 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -201,7 +201,7 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
201} 201}
202 202
203/* 203/*
204 * Wrapper routine for free'ing DMA maps 204 * Wrapper routine for freeing DMA maps
205 * DMA mappings for Direct 64 and 32 do not have any DMA maps. 205 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
206 */ 206 */
207void 207void
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 8a2cb4e691fd..b9bedbd6e1d6 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -223,7 +223,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
223 223
224 /* 224 /*
225 * Scan all vga controllers on this bus making sure they all 225 * Scan all vga controllers on this bus making sure they all
226 * suport FW. If not, return. 226 * support FW. If not, return.
227 */ 227 */
228 228
229 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { 229 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
@@ -364,7 +364,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
364 * @req_size: len (bytes) to map 364 * @req_size: len (bytes) to map
365 * 365 *
366 * Map @paddr into CA address space using the GART mechanism. The mapped 366 * Map @paddr into CA address space using the GART mechanism. The mapped
367 * dma_addr_t is guarenteed to be contiguous in CA bus space. 367 * dma_addr_t is guaranteed to be contiguous in CA bus space.
368 */ 368 */
369static dma_addr_t 369static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size) 370tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
@@ -526,7 +526,7 @@ tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
526 return 0; 526 return 0;
527 527
528 /* 528 /*
529 * If card is 64 or 48 bit addresable, use a direct mapping. 32 529 * If card is 64 or 48 bit addressable, use a direct mapping. 32
530 * bit direct is so restrictive w.r.t. where the memory resides that 530 * bit direct is so restrictive w.r.t. where the memory resides that
531 * we don't use it even though CA has some support. 531 * we don't use it even though CA has some support.
532 */ 532 */
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 35f854fb6120..f4c0b961a939 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -256,9 +256,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
256 * @ct_addr: the coretalk address to map 256 * @ct_addr: the coretalk address to map
257 * @len: number of bytes to map 257 * @len: number of bytes to map
258 * 258 *
259 * Given the addressing type, set up various paramaters that define the 259 * Given the addressing type, set up various parameters that define the
260 * ATE pool to use. Search for a contiguous block of entries to cover the 260 * ATE pool to use. Search for a contiguous block of entries to cover the
261 * length, and if enough resources exist, fill in the ATE's and construct a 261 * length, and if enough resources exist, fill in the ATEs and construct a
262 * tioce_dmamap struct to track the mapping. 262 * tioce_dmamap struct to track the mapping.
263 */ 263 */
264static u64 264static u64
@@ -581,8 +581,8 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
581 */ 581 */
582 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { 582 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
583 /* 583 /*
584 * We have two options for 40-bit mappings: 16GB "super" ATE's 584 * We have two options for 40-bit mappings: 16GB "super" ATEs
585 * and 64MB "regular" ATE's. We'll try both if needed for a 585 * and 64MB "regular" ATEs. We'll try both if needed for a
586 * given mapping but which one we try first depends on the 586 * given mapping but which one we try first depends on the
587 * size. For requests >64MB, prefer to use a super page with 587 * size. For requests >64MB, prefer to use a super page with
588 * regular as the fallback. Otherwise, try in the reverse order. 588 * regular as the fallback. Otherwise, try in the reverse order.
@@ -687,8 +687,8 @@ tioce_error_intr_handler(int irq, void *arg)
687} 687}
688 688
689/** 689/**
690 * tioce_reserve_m32 - reserve M32 ate's for the indicated address range 690 * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
691 * @tioce_kernel: TIOCE context to reserve ate's for 691 * @tioce_kernel: TIOCE context to reserve ATEs for
692 * @base: starting bus address to reserve 692 * @base: starting bus address to reserve
693 * @limit: last bus address to reserve 693 * @limit: last bus address to reserve
694 * 694 *
@@ -763,7 +763,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
763 763
764 /* 764 /*
765 * Set PMU pagesize to the largest size available, and zero out 765 * Set PMU pagesize to the largest size available, and zero out
766 * the ate's. 766 * the ATEs.
767 */ 767 */
768 768
769 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base; 769 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
@@ -784,7 +784,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
784 } 784 }
785 785
786 /* 786 /*
787 * Reserve ATE's corresponding to reserved address ranges. These 787 * Reserve ATEs corresponding to reserved address ranges. These
788 * include: 788 * include:
789 * 789 *
790 * Memory space covered by each PPB mem base/limit register 790 * Memory space covered by each PPB mem base/limit register