aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug3
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/include/asm/smp_64.h3
-rw-r--r--arch/sparc/include/asm/system_32.h2
-rw-r--r--arch/sparc/include/asm/topology_64.h12
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/irq_32.c2
-rw-r--r--arch/sparc/kernel/irq_64.c4
-rw-r--r--arch/sparc/kernel/led.c1
-rw-r--r--arch/sparc/kernel/mdesc.c2
-rw-r--r--arch/sparc/kernel/nmi.c23
-rw-r--r--arch/sparc/kernel/prom_64.c4
-rw-r--r--arch/sparc/kernel/smp_32.c36
-rw-r--r--arch/sparc/kernel/smp_64.c14
-rw-r--r--arch/sparc/kernel/sun4d_irq.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c11
-rw-r--r--arch/sparc/kernel/sun4m_smp.c10
-rw-r--r--arch/sparc/mm/highmem.c1
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c2
21 files changed, 78 insertions, 68 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c3ea215334f6..cc12cd48bbc5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -124,6 +124,9 @@ config ARCH_NO_VIRT_TO_BUS
124config OF 124config OF
125 def_bool y 125 def_bool y
126 126
127config ARCH_SUPPORTS_DEBUG_PAGEALLOC
128 def_bool y if SPARC64
129
127source "init/Kconfig" 130source "init/Kconfig"
128 131
129source "kernel/Kconfig.freezer" 132source "kernel/Kconfig.freezer"
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index b8a15e271bfa..d001b42041a5 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -24,7 +24,8 @@ config STACK_DEBUG
24 24
25config DEBUG_PAGEALLOC 25config DEBUG_PAGEALLOC
26 bool "Debug page memory allocations" 26 bool "Debug page memory allocations"
27 depends on SPARC64 && DEBUG_KERNEL && !HIBERNATION 27 depends on DEBUG_KERNEL && !HIBERNATION
28 depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
28 help 29 help
29 Unmap pages from the kernel linear mapping after free_pages(). 30 Unmap pages from the kernel linear mapping after free_pages().
30 This results in a large slowdown, but helps to find certain types 31 This results in a large slowdown, but helps to find certain types
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 5693ab482606..666a73fef28d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -121,8 +121,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
121 * local TLB. 121 * local TLB.
122 */ 122 */
123 cpu = smp_processor_id(); 123 cpu = smp_processor_id();
124 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 124 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
125 cpu_set(cpu, mm->cpu_vm_mask); 125 cpumask_set_cpu(cpu, mm_cpumask(mm));
126 __flush_tlb_mm(CTX_HWBITS(mm->context), 126 __flush_tlb_mm(CTX_HWBITS(mm->context),
127 SECONDARY_CONTEXT); 127 SECONDARY_CONTEXT);
128 } 128 }
@@ -141,8 +141,8 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
141 if (!CTX_VALID(mm->context)) 141 if (!CTX_VALID(mm->context))
142 get_new_mmu_context(mm); 142 get_new_mmu_context(mm);
143 cpu = smp_processor_id(); 143 cpu = smp_processor_id();
144 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 144 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
145 cpu_set(cpu, mm->cpu_vm_mask); 145 cpumask_set_cpu(cpu, mm_cpumask(mm));
146 146
147 load_secondary_context(mm); 147 load_secondary_context(mm);
148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 57224dd37b3a..becb6bf353a9 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -35,7 +35,8 @@ extern cpumask_t cpu_core_map[NR_CPUS];
35extern int sparc64_multi_core; 35extern int sparc64_multi_core;
36 36
37extern void arch_send_call_function_single_ipi(int cpu); 37extern void arch_send_call_function_single_ipi(int cpu);
38extern void arch_send_call_function_ipi(cpumask_t mask); 38extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
39#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
39 40
40/* 41/*
41 * General functions that each host system must provide. 42 * General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 79c1ae2b42a3..751c8c17f5a0 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -126,7 +126,7 @@ extern void flushw_all(void);
126#define switch_to(prev, next, last) do { \ 126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \ 127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \ 128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \ 129 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
130 __asm__ __volatile__( \ 130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \ 131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \ 132 "mov %%g6, %%g3\n\t" \
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 5bc0b8fd6374..e5ea8d332421 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -28,11 +28,6 @@ static inline cpumask_t node_to_cpumask(int node)
28#define node_to_cpumask_ptr_next(v, node) \ 28#define node_to_cpumask_ptr_next(v, node) \
29 v = &(numa_cpumask_lookup_table[node]) 29 v = &(numa_cpumask_lookup_table[node])
30 30
31static inline int node_to_first_cpu(int node)
32{
33 return cpumask_first(cpumask_of_node(node));
34}
35
36struct pci_bus; 31struct pci_bus;
37#ifdef CONFIG_PCI 32#ifdef CONFIG_PCI
38extern int pcibus_to_node(struct pci_bus *pbus); 33extern int pcibus_to_node(struct pci_bus *pbus);
@@ -43,13 +38,9 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
43} 38}
44#endif 39#endif
45 40
46#define pcibus_to_cpumask(bus) \
47 (pcibus_to_node(bus) == -1 ? \
48 CPU_MASK_ALL : \
49 node_to_cpumask(pcibus_to_node(bus)))
50#define cpumask_of_pcibus(bus) \ 41#define cpumask_of_pcibus(bus) \
51 (pcibus_to_node(bus) == -1 ? \ 42 (pcibus_to_node(bus) == -1 ? \
52 CPU_MASK_ALL_PTR : \ 43 cpu_all_mask : \
53 cpumask_of_node(pcibus_to_node(bus))) 44 cpumask_of_node(pcibus_to_node(bus)))
54 45
55#define SD_NODE_INIT (struct sched_domain) { \ 46#define SD_NODE_INIT (struct sched_domain) { \
@@ -89,7 +80,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
89#define smt_capable() (sparc64_multi_core) 80#define smt_capable() (sparc64_multi_core)
90#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
91 82
92#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
93#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) 83#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu])
94 84
95#endif /* _ASM_SPARC64_TOPOLOGY_H */ 85#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 57c39843fb2a..90350f838f05 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -653,7 +653,7 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,
653 if (cpu_list[i] == CPU_SENTINEL) 653 if (cpu_list[i] == CPU_SENTINEL)
654 continue; 654 continue;
655 655
656 if (cpu_list[i] < NR_CPUS) 656 if (cpu_list[i] < nr_cpu_ids)
657 cpu_set(cpu_list[i], mask); 657 cpu_set(cpu_list[i], mask);
658 } 658 }
659 659
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 44dd5ee64339..ad800b80c718 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -439,7 +439,6 @@ static int request_fast_irq(unsigned int irq,
439 flush_cache_all(); 439 flush_cache_all();
440 440
441 action->flags = irqflags; 441 action->flags = irqflags;
442 cpus_clear(action->mask);
443 action->name = devname; 442 action->name = devname;
444 action->dev_id = NULL; 443 action->dev_id = NULL;
445 action->next = NULL; 444 action->next = NULL;
@@ -574,7 +573,6 @@ int request_irq(unsigned int irq,
574 573
575 action->handler = handler; 574 action->handler = handler;
576 action->flags = irqflags; 575 action->flags = irqflags;
577 cpus_clear(action->mask);
578 action->name = devname; 576 action->name = devname;
579 action->next = NULL; 577 action->next = NULL;
580 action->dev_id = dev_id; 578 action->dev_id = dev_id;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index d0d6a515499a..5deabe921a47 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -266,12 +266,12 @@ static int irq_choose_cpu(unsigned int virt_irq)
266 spin_lock_irqsave(&irq_rover_lock, flags); 266 spin_lock_irqsave(&irq_rover_lock, flags);
267 267
268 while (!cpu_online(irq_rover)) { 268 while (!cpu_online(irq_rover)) {
269 if (++irq_rover >= NR_CPUS) 269 if (++irq_rover >= nr_cpu_ids)
270 irq_rover = 0; 270 irq_rover = 0;
271 } 271 }
272 cpuid = irq_rover; 272 cpuid = irq_rover;
273 do { 273 do {
274 if (++irq_rover >= NR_CPUS) 274 if (++irq_rover >= nr_cpu_ids)
275 irq_rover = 0; 275 irq_rover = 0;
276 } while (!cpu_online(irq_rover)); 276 } while (!cpu_online(irq_rover));
277 277
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index adaaed4ea2fb..00d034ea2164 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -126,7 +126,6 @@ static int __init led_init(void)
126 led = proc_create("led", 0, NULL, &led_proc_fops); 126 led = proc_create("led", 0, NULL, &led_proc_fops);
127 if (!led) 127 if (!led)
128 return -ENOMEM; 128 return -ENOMEM;
129 led->owner = THIS_MODULE;
130 129
131 printk(KERN_INFO 130 printk(KERN_INFO
132 "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n", 131 "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n",
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 3f79f0c23a08..f0e6ed23a468 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -567,7 +567,7 @@ static void __init report_platform_properties(void)
567 max_cpu = NR_CPUS; 567 max_cpu = NR_CPUS;
568 } 568 }
569 for (i = 0; i < max_cpu; i++) 569 for (i = 0; i < max_cpu; i++)
570 cpu_set(i, cpu_possible_map); 570 set_cpu_possible(i, true);
571 } 571 }
572#endif 572#endif
573 573
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index f3577223c863..2c0cc72d295b 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kprobes.h> 14#include <linux/kprobes.h>
15#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
16#include <linux/reboot.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/kdebug.h> 18#include <linux/kdebug.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
@@ -206,13 +207,33 @@ void nmi_adjust_hz(unsigned int new_hz)
206} 207}
207EXPORT_SYMBOL_GPL(nmi_adjust_hz); 208EXPORT_SYMBOL_GPL(nmi_adjust_hz);
208 209
210static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
211{
212 on_each_cpu(stop_watchdog, NULL, 1);
213 return 0;
214}
215
216static struct notifier_block nmi_reboot_notifier = {
217 .notifier_call = nmi_shutdown,
218};
219
209int __init nmi_init(void) 220int __init nmi_init(void)
210{ 221{
222 int err;
223
211 nmi_usable = 1; 224 nmi_usable = 1;
212 225
213 on_each_cpu(start_watchdog, NULL, 1); 226 on_each_cpu(start_watchdog, NULL, 1);
214 227
215 return check_nmi_watchdog(); 228 err = check_nmi_watchdog();
229 if (!err) {
230 err = register_reboot_notifier(&nmi_reboot_notifier);
231 if (err) {
232 nmi_usable = 0;
233 on_each_cpu(stop_watchdog, NULL, 1);
234 }
235 }
236 return err;
216} 237}
217 238
218static int __init setup_nmi_watchdog(char *str) 239static int __init setup_nmi_watchdog(char *str)
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index edecca7b8116..ca55c7012f77 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -518,8 +518,8 @@ void __init of_fill_in_cpu_data(void)
518 } 518 }
519 519
520#ifdef CONFIG_SMP 520#ifdef CONFIG_SMP
521 cpu_set(cpuid, cpu_present_map); 521 set_cpu_present(cpuid, true);
522 cpu_set(cpuid, cpu_possible_map); 522 set_cpu_possible(cpuid, true);
523#endif 523#endif
524 } 524 }
525 525
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 1e5ac4e282e1..132d81fb2616 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -70,13 +70,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
70 extern void smp4m_smp_done(void); 70 extern void smp4m_smp_done(void);
71 extern void smp4d_smp_done(void); 71 extern void smp4d_smp_done(void);
72 unsigned long bogosum = 0; 72 unsigned long bogosum = 0;
73 int cpu, num; 73 int cpu, num = 0;
74 74
75 for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++) 75 for_each_online_cpu(cpu) {
76 if (cpu_online(cpu)) { 76 num++;
77 num++; 77 bogosum += cpu_data(cpu).udelay_val;
78 bogosum += cpu_data(cpu).udelay_val; 78 }
79 }
80 79
81 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 80 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
82 num, bogosum/(500000/HZ), 81 num, bogosum/(500000/HZ),
@@ -144,7 +143,7 @@ void smp_flush_tlb_all(void)
144void smp_flush_cache_mm(struct mm_struct *mm) 143void smp_flush_cache_mm(struct mm_struct *mm)
145{ 144{
146 if(mm->context != NO_CONTEXT) { 145 if(mm->context != NO_CONTEXT) {
147 cpumask_t cpu_mask = mm->cpu_vm_mask; 146 cpumask_t cpu_mask = *mm_cpumask(mm);
148 cpu_clear(smp_processor_id(), cpu_mask); 147 cpu_clear(smp_processor_id(), cpu_mask);
149 if (!cpus_empty(cpu_mask)) 148 if (!cpus_empty(cpu_mask))
150 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 149 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
@@ -155,12 +154,13 @@ void smp_flush_cache_mm(struct mm_struct *mm)
155void smp_flush_tlb_mm(struct mm_struct *mm) 154void smp_flush_tlb_mm(struct mm_struct *mm)
156{ 155{
157 if(mm->context != NO_CONTEXT) { 156 if(mm->context != NO_CONTEXT) {
158 cpumask_t cpu_mask = mm->cpu_vm_mask; 157 cpumask_t cpu_mask = *mm_cpumask(mm);
159 cpu_clear(smp_processor_id(), cpu_mask); 158 cpu_clear(smp_processor_id(), cpu_mask);
160 if (!cpus_empty(cpu_mask)) { 159 if (!cpus_empty(cpu_mask)) {
161 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 160 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
162 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 161 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
163 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 162 cpumask_copy(mm_cpumask(mm),
163 cpumask_of(smp_processor_id()));
164 } 164 }
165 local_flush_tlb_mm(mm); 165 local_flush_tlb_mm(mm);
166 } 166 }
@@ -172,7 +172,7 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
172 struct mm_struct *mm = vma->vm_mm; 172 struct mm_struct *mm = vma->vm_mm;
173 173
174 if (mm->context != NO_CONTEXT) { 174 if (mm->context != NO_CONTEXT) {
175 cpumask_t cpu_mask = mm->cpu_vm_mask; 175 cpumask_t cpu_mask = *mm_cpumask(mm);
176 cpu_clear(smp_processor_id(), cpu_mask); 176 cpu_clear(smp_processor_id(), cpu_mask);
177 if (!cpus_empty(cpu_mask)) 177 if (!cpus_empty(cpu_mask))
178 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 178 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
@@ -186,7 +186,7 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
186 struct mm_struct *mm = vma->vm_mm; 186 struct mm_struct *mm = vma->vm_mm;
187 187
188 if (mm->context != NO_CONTEXT) { 188 if (mm->context != NO_CONTEXT) {
189 cpumask_t cpu_mask = mm->cpu_vm_mask; 189 cpumask_t cpu_mask = *mm_cpumask(mm);
190 cpu_clear(smp_processor_id(), cpu_mask); 190 cpu_clear(smp_processor_id(), cpu_mask);
191 if (!cpus_empty(cpu_mask)) 191 if (!cpus_empty(cpu_mask))
192 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 192 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
@@ -199,7 +199,7 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
199 struct mm_struct *mm = vma->vm_mm; 199 struct mm_struct *mm = vma->vm_mm;
200 200
201 if(mm->context != NO_CONTEXT) { 201 if(mm->context != NO_CONTEXT) {
202 cpumask_t cpu_mask = mm->cpu_vm_mask; 202 cpumask_t cpu_mask = *mm_cpumask(mm);
203 cpu_clear(smp_processor_id(), cpu_mask); 203 cpu_clear(smp_processor_id(), cpu_mask);
204 if (!cpus_empty(cpu_mask)) 204 if (!cpus_empty(cpu_mask))
205 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 205 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
@@ -212,7 +212,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212 struct mm_struct *mm = vma->vm_mm; 212 struct mm_struct *mm = vma->vm_mm;
213 213
214 if(mm->context != NO_CONTEXT) { 214 if(mm->context != NO_CONTEXT) {
215 cpumask_t cpu_mask = mm->cpu_vm_mask; 215 cpumask_t cpu_mask = *mm_cpumask(mm);
216 cpu_clear(smp_processor_id(), cpu_mask); 216 cpu_clear(smp_processor_id(), cpu_mask);
217 if (!cpus_empty(cpu_mask)) 217 if (!cpus_empty(cpu_mask))
218 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 218 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
@@ -241,7 +241,7 @@ void smp_flush_page_to_ram(unsigned long page)
241 241
242void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 242void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
243{ 243{
244 cpumask_t cpu_mask = mm->cpu_vm_mask; 244 cpumask_t cpu_mask = *mm_cpumask(mm);
245 cpu_clear(smp_processor_id(), cpu_mask); 245 cpu_clear(smp_processor_id(), cpu_mask);
246 if (!cpus_empty(cpu_mask)) 246 if (!cpus_empty(cpu_mask))
247 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 247 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
@@ -332,8 +332,8 @@ void __init smp_setup_cpu_possible_map(void)
332 instance = 0; 332 instance = 0;
333 while (!cpu_find_by_instance(instance, NULL, &mid)) { 333 while (!cpu_find_by_instance(instance, NULL, &mid)) {
334 if (mid < NR_CPUS) { 334 if (mid < NR_CPUS) {
335 cpu_set(mid, cpu_possible_map); 335 set_cpu_possible(mid, true);
336 cpu_set(mid, cpu_present_map); 336 set_cpu_present(mid, true);
337 } 337 }
338 instance++; 338 instance++;
339 } 339 }
@@ -351,8 +351,8 @@ void __init smp_prepare_boot_cpu(void)
351 printk("boot cpu id != 0, this could work but is untested\n"); 351 printk("boot cpu id != 0, this could work but is untested\n");
352 352
353 current_thread_info()->cpu = cpuid; 353 current_thread_info()->cpu = cpuid;
354 cpu_set(cpuid, cpu_online_map); 354 set_cpu_online(cpuid, true);
355 cpu_set(cpuid, cpu_possible_map); 355 set_cpu_possible(cpuid, true);
356} 356}
357 357
358int __cpuinit __cpu_up(unsigned int cpu) 358int __cpuinit __cpu_up(unsigned int cpu)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 79457f682b5a..708e12a26b05 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -808,9 +808,9 @@ static void smp_start_sync_tick_client(int cpu)
808 808
809extern unsigned long xcall_call_function; 809extern unsigned long xcall_call_function;
810 810
811void arch_send_call_function_ipi(cpumask_t mask) 811void arch_send_call_function_ipi_mask(const struct cpumask *mask)
812{ 812{
813 xcall_deliver((u64) &xcall_call_function, 0, 0, &mask); 813 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
814} 814}
815 815
816extern unsigned long xcall_call_function_single; 816extern unsigned long xcall_call_function_single;
@@ -850,7 +850,7 @@ static void tsb_sync(void *info)
850 850
851void smp_tsb_sync(struct mm_struct *mm) 851void smp_tsb_sync(struct mm_struct *mm)
852{ 852{
853 smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1); 853 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
854} 854}
855 855
856extern unsigned long xcall_flush_tlb_mm; 856extern unsigned long xcall_flush_tlb_mm;
@@ -1055,13 +1055,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
1055 int cpu = get_cpu(); 1055 int cpu = get_cpu();
1056 1056
1057 if (atomic_read(&mm->mm_users) == 1) { 1057 if (atomic_read(&mm->mm_users) == 1) {
1058 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1058 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1059 goto local_flush_and_out; 1059 goto local_flush_and_out;
1060 } 1060 }
1061 1061
1062 smp_cross_call_masked(&xcall_flush_tlb_mm, 1062 smp_cross_call_masked(&xcall_flush_tlb_mm,
1063 ctx, 0, 0, 1063 ctx, 0, 0,
1064 &mm->cpu_vm_mask); 1064 mm_cpumask(mm));
1065 1065
1066local_flush_and_out: 1066local_flush_and_out:
1067 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1067 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
@@ -1075,11 +1075,11 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
1075 int cpu = get_cpu(); 1075 int cpu = get_cpu();
1076 1076
1077 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1077 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1078 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1079 else 1079 else
1080 smp_cross_call_masked(&xcall_flush_tlb_pending, 1080 smp_cross_call_masked(&xcall_flush_tlb_pending,
1081 ctx, nr, (unsigned long) vaddrs, 1081 ctx, nr, (unsigned long) vaddrs,
1082 &mm->cpu_vm_mask); 1082 mm_cpumask(mm));
1083 1083
1084 __flush_tlb_pending(ctx, nr, vaddrs); 1084 __flush_tlb_pending(ctx, nr, vaddrs);
1085 1085
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 3369fef5b4b3..ab036a72de5a 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -326,7 +326,6 @@ int sun4d_request_irq(unsigned int irq,
326 326
327 action->handler = handler; 327 action->handler = handler;
328 action->flags = irqflags; 328 action->flags = irqflags;
329 cpus_clear(action->mask);
330 action->name = devname; 329 action->name = devname;
331 action->next = NULL; 330 action->next = NULL;
332 action->dev_id = dev_id; 331 action->dev_id = dev_id;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 50afaed99c8a..54fb02468f0d 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -150,7 +150,7 @@ void __cpuinit smp4d_callin(void)
150 spin_lock_irqsave(&sun4d_imsk_lock, flags); 150 spin_lock_irqsave(&sun4d_imsk_lock, flags);
151 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ 151 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
152 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 152 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
153 cpu_set(cpuid, cpu_online_map); 153 set_cpu_online(cpuid, true);
154 154
155} 155}
156 156
@@ -228,11 +228,10 @@ void __init smp4d_smp_done(void)
228 /* setup cpu list for irq rotation */ 228 /* setup cpu list for irq rotation */
229 first = 0; 229 first = 0;
230 prev = &first; 230 prev = &first;
231 for (i = 0; i < NR_CPUS; i++) 231 for_each_online_cpu(i) {
232 if (cpu_online(i)) { 232 *prev = i;
233 *prev = i; 233 prev = &cpu_data(i).next;
234 prev = &cpu_data(i).next; 234 }
235 }
236 *prev = first; 235 *prev = first;
237 local_flush_cache_all(); 236 local_flush_cache_all();
238 237
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 8040376c4890..960b113d0006 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -113,7 +113,7 @@ void __cpuinit smp4m_callin(void)
113 113
114 local_irq_enable(); 114 local_irq_enable();
115 115
116 cpu_set(cpuid, cpu_online_map); 116 set_cpu_online(cpuid, true);
117} 117}
118 118
119/* 119/*
@@ -186,11 +186,9 @@ void __init smp4m_smp_done(void)
186 /* setup cpu list for irq rotation */ 186 /* setup cpu list for irq rotation */
187 first = 0; 187 first = 0;
188 prev = &first; 188 prev = &first;
189 for (i = 0; i < NR_CPUS; i++) { 189 for_each_online_cpu(i) {
190 if (cpu_online(i)) { 190 *prev = i;
191 *prev = i; 191 prev = &cpu_data(i).next;
192 prev = &cpu_data(i).next;
193 }
194 } 192 }
195 *prev = first; 193 *prev = first;
196 local_flush_cache_all(); 194 local_flush_cache_all();
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 752d0c9fb544..7916feba6e4a 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -39,6 +39,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
42 debug_kmap_atomic(type);
42 idx = type + KM_TYPE_NR*smp_processor_id(); 43 idx = type + KM_TYPE_NR*smp_processor_id();
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 45
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 00373ce2d8fb..2c8dfeb7ab04 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1092,7 +1092,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1092 if (strcmp(name, "cpu")) 1092 if (strcmp(name, "cpu"))
1093 continue; 1093 continue;
1094 id = mdesc_get_property(md, target, "id", NULL); 1094 id = mdesc_get_property(md, target, "id", NULL);
1095 if (*id < NR_CPUS) 1095 if (*id < nr_cpu_ids)
1096 cpu_set(*id, *mask); 1096 cpu_set(*id, *mask);
1097 } 1097 }
1098} 1098}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index fe7ed08390bb..06c9a7d98206 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1425,7 +1425,7 @@ static void __init init_vac_layout(void)
1425 min_line_size = vac_line_size; 1425 min_line_size = vac_line_size;
1426 //FIXME: cpus not contiguous!! 1426 //FIXME: cpus not contiguous!!
1427 cpu++; 1427 cpu++;
1428 if (cpu >= NR_CPUS || !cpu_online(cpu)) 1428 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1429 break; 1429 break;
1430#else 1430#else
1431 break; 1431 break;