aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c12
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/include/asm/dma-mapping.h18
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/smp.c27
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/dma-default.c8
-rw-r--r--arch/mips/netlogic/common/smp.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c7
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
15 files changed, 57 insertions, 56 deletions
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index b6bb92c16a47..41dd00884975 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
157} 157}
158 158
159static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, 159static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
160 dma_addr_t *dma_handle, gfp_t gfp) 160 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
161{ 161{
162 void *ret; 162 void *ret;
163 163
@@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
192} 192}
193 193
194static void octeon_dma_free_coherent(struct device *dev, size_t size, 194static void octeon_dma_free_coherent(struct device *dev, size_t size,
195 void *vaddr, dma_addr_t dma_handle) 195 void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
196{ 196{
197 int order = get_order(size); 197 int order = get_order(size);
198 198
@@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys);
240 240
241static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { 241static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
242 .dma_map_ops = { 242 .dma_map_ops = {
243 .alloc_coherent = octeon_dma_alloc_coherent, 243 .alloc = octeon_dma_alloc_coherent,
244 .free_coherent = octeon_dma_free_coherent, 244 .free = octeon_dma_free_coherent,
245 .map_page = octeon_dma_map_page, 245 .map_page = octeon_dma_map_page,
246 .unmap_page = swiotlb_unmap_page, 246 .unmap_page = swiotlb_unmap_page,
247 .map_sg = octeon_dma_map_sg, 247 .map_sg = octeon_dma_map_sg,
@@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void)
325#ifdef CONFIG_PCI 325#ifdef CONFIG_PCI
326static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { 326static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
327 .dma_map_ops = { 327 .dma_map_ops = {
328 .alloc_coherent = octeon_dma_alloc_coherent, 328 .alloc = octeon_dma_alloc_coherent,
329 .free_coherent = octeon_dma_free_coherent, 329 .free = octeon_dma_free_coherent,
330 .map_page = octeon_dma_map_page, 330 .map_page = octeon_dma_map_page,
331 .unmap_page = swiotlb_unmap_page, 331 .unmap_page = swiotlb_unmap_page,
332 .map_sg = octeon_dma_map_sg, 332 .map_sg = octeon_dma_map_sg,
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index c3e2b85c3b02..97e7ce9b50ed 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -78,7 +78,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
78} 78}
79 79
80/** 80/**
81 * Detect available CPUs, populate cpu_possible_map 81 * Detect available CPUs, populate cpu_possible_mask
82 */ 82 */
83static void octeon_smp_hotplug_setup(void) 83static void octeon_smp_hotplug_setup(void)
84{ 84{
@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void)
268 268
269 spin_lock(&smp_reserve_lock); 269 spin_lock(&smp_reserve_lock);
270 270
271 cpu_clear(cpu, cpu_online_map); 271 set_cpu_online(cpu, false);
272 cpu_clear(cpu, cpu_callin_map); 272 cpu_clear(cpu, cpu_callin_map);
273 local_irq_disable(); 273 local_irq_disable();
274 fixup_irqs(); 274 fixup_irqs();
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 7aa37ddfca4b..be39a12901c6 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask)
57extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 57extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
58 enum dma_data_direction direction); 58 enum dma_data_direction direction);
59 59
60static inline void *dma_alloc_coherent(struct device *dev, size_t size, 60#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
61 dma_addr_t *dma_handle, gfp_t gfp) 61
62static inline void *dma_alloc_attrs(struct device *dev, size_t size,
63 dma_addr_t *dma_handle, gfp_t gfp,
64 struct dma_attrs *attrs)
62{ 65{
63 void *ret; 66 void *ret;
64 struct dma_map_ops *ops = get_dma_ops(dev); 67 struct dma_map_ops *ops = get_dma_ops(dev);
65 68
66 ret = ops->alloc_coherent(dev, size, dma_handle, gfp); 69 ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
67 70
68 debug_dma_alloc_coherent(dev, size, *dma_handle, ret); 71 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
69 72
70 return ret; 73 return ret;
71} 74}
72 75
73static inline void dma_free_coherent(struct device *dev, size_t size, 76#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
74 void *vaddr, dma_addr_t dma_handle) 77
78static inline void dma_free_attrs(struct device *dev, size_t size,
79 void *vaddr, dma_addr_t dma_handle,
80 struct dma_attrs *attrs)
75{ 81{
76 struct dma_map_ops *ops = get_dma_ops(dev); 82 struct dma_map_ops *ops = get_dma_ops(dev);
77 83
78 ops->free_coherent(dev, size, vaddr, dma_handle); 84 ops->free(dev, size, vaddr, dma_handle, attrs);
79 85
80 debug_dma_free_coherent(dev, size, vaddr, dma_handle); 86 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
81} 87}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e6160f37e..33f63bab478a 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
177 177
178out_unlock: 178out_unlock:
179 read_unlock(&tasklist_lock); 179 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e309665b6c81..f8b2c592514d 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
25 int i; 25 int i;
26 26
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (!cpu_isset(n, cpu_online_map)) 28 if (!cpu_online(n))
29 return 0; 29 return 0;
30#endif 30#endif
31 31
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ca673569fd24..3046e2986006 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void)
317 317
318 pr_info("SMP: CPU%d is offline\n", cpu); 318 pr_info("SMP: CPU%d is offline\n", cpu);
319 319
320 cpu_clear(cpu, cpu_online_map); 320 set_cpu_online(cpu, false);
321 cpu_clear(cpu, cpu_callin_map); 321 cpu_clear(cpu, cpu_callin_map);
322 322
323 local_flush_tlb_all(); 323 local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9c1cce9de35f..ba9376bf52a1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy)
148 /* 148 /*
149 * Remove this CPU: 149 * Remove this CPU:
150 */ 150 */
151 cpu_clear(smp_processor_id(), cpu_online_map); 151 set_cpu_online(smp_processor_id(), false);
152 for (;;) { 152 for (;;) {
153 if (cpu_wait) 153 if (cpu_wait)
154 (*cpu_wait)(); /* Wait if available. */ 154 (*cpu_wait)(); /* Wait if available. */
@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
174 mp_ops->prepare_cpus(max_cpus); 174 mp_ops->prepare_cpus(max_cpus);
175 set_cpu_sibling_map(0); 175 set_cpu_sibling_map(0);
176#ifndef CONFIG_HOTPLUG_CPU 176#ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(&cpu_possible_map); 177 init_cpu_present(cpu_possible_mask);
178#endif 178#endif
179} 179}
180 180
@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
248 while (!cpu_isset(cpu, cpu_callin_map)) 248 while (!cpu_isset(cpu, cpu_callin_map))
249 udelay(100); 249 udelay(100);
250 250
251 cpu_set(cpu, cpu_online_map); 251 set_cpu_online(cpu, true);
252 252
253 return 0; 253 return 0;
254} 254}
@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm)
320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
322 } else { 322 } else {
323 cpumask_t mask = cpu_online_map;
324 unsigned int cpu; 323 unsigned int cpu;
325 324
326 cpu_clear(smp_processor_id(), mask); 325 for_each_online_cpu(cpu) {
327 for_each_cpu_mask(cpu, mask) 326 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
328 if (cpu_context(cpu, mm))
329 cpu_context(cpu, mm) = 0; 327 cpu_context(cpu, mm) = 0;
328 }
330 } 329 }
331 local_flush_tlb_mm(mm); 330 local_flush_tlb_mm(mm);
332 331
@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
360 359
361 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 360 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
362 } else { 361 } else {
363 cpumask_t mask = cpu_online_map;
364 unsigned int cpu; 362 unsigned int cpu;
365 363
366 cpu_clear(smp_processor_id(), mask); 364 for_each_online_cpu(cpu) {
367 for_each_cpu_mask(cpu, mask) 365 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
368 if (cpu_context(cpu, mm))
369 cpu_context(cpu, mm) = 0; 366 cpu_context(cpu, mm) = 0;
367 }
370 } 368 }
371 local_flush_tlb_range(vma, start, end); 369 local_flush_tlb_range(vma, start, end);
372 preempt_enable(); 370 preempt_enable();
@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
407 405
408 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 406 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
409 } else { 407 } else {
410 cpumask_t mask = cpu_online_map;
411 unsigned int cpu; 408 unsigned int cpu;
412 409
413 cpu_clear(smp_processor_id(), mask); 410 for_each_online_cpu(cpu) {
414 for_each_cpu_mask(cpu, mask) 411 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
415 if (cpu_context(cpu, vma->vm_mm))
416 cpu_context(cpu, vma->vm_mm) = 0; 412 cpu_context(cpu, vma->vm_mm) = 0;
413 }
417 } 414 }
418 local_flush_tlb_page(vma, page); 415 local_flush_tlb_page(vma, page);
419 preempt_enable(); 416 preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c4f75bbc0bd6..f5dd38f1d015 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void)
291 * possibly leave some TCs/VPEs as "slave" processors. 291 * possibly leave some TCs/VPEs as "slave" processors.
292 * 292 *
293 * Use c0_MVPConf0 to find out how many TCs are available, setting up 293 * Use c0_MVPConf0 to find out how many TCs are available, setting up
294 * cpu_possible_map and the logical/physical mappings. 294 * cpu_possible_mask and the logical/physical mappings.
295 */ 295 */
296 296
297int __init smtc_build_cpu_map(int start_cpu_slot) 297int __init smtc_build_cpu_map(int start_cpu_slot)
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07f53c8..47037ec5589b 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
80 if (vma) 80 if (vma)
81 mask = *mm_cpumask(vma->vm_mm); 81 mask = *mm_cpumask(vma->vm_mm);
82 else 82 else
83 mask = cpu_online_map; 83 mask = *cpu_online_mask;
84 cpu_clear(cpu, mask); 84 cpumask_clear_cpu(cpu, &mask);
85 for_each_cpu_mask(cpu, mask) 85 for_each_cpu(cpu, &mask)
86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
87 87
88 preempt_enable(); 88 preempt_enable();
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 46084912e588..3fab2046c8a4 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
98EXPORT_SYMBOL(dma_alloc_noncoherent); 98EXPORT_SYMBOL(dma_alloc_noncoherent);
99 99
100static void *mips_dma_alloc_coherent(struct device *dev, size_t size, 100static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
101 dma_addr_t * dma_handle, gfp_t gfp) 101 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
102{ 102{
103 void *ret; 103 void *ret;
104 104
@@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
132EXPORT_SYMBOL(dma_free_noncoherent); 132EXPORT_SYMBOL(dma_free_noncoherent);
133 133
134static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, 134static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
135 dma_addr_t dma_handle) 135 dma_addr_t dma_handle, struct dma_attrs *attrs)
136{ 136{
137 unsigned long addr = (unsigned long) vaddr; 137 unsigned long addr = (unsigned long) vaddr;
138 int order = get_order(size); 138 int order = get_order(size);
@@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
323EXPORT_SYMBOL(dma_cache_sync); 323EXPORT_SYMBOL(dma_cache_sync);
324 324
325static struct dma_map_ops mips_default_dma_map_ops = { 325static struct dma_map_ops mips_default_dma_map_ops = {
326 .alloc_coherent = mips_dma_alloc_coherent, 326 .alloc = mips_dma_alloc_coherent,
327 .free_coherent = mips_dma_free_coherent, 327 .free = mips_dma_free_coherent,
328 .map_page = mips_dma_map_page, 328 .map_page = mips_dma_map_page,
329 .unmap_page = mips_dma_unmap_page, 329 .unmap_page = mips_dma_unmap_page,
330 .map_sg = mips_dma_map_sg, 330 .map_sg = mips_dma_map_sg,
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49886c2..fab316de57e9 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void)
165 cpu_set(boot_cpu, phys_cpu_present_map); 165 cpu_set(boot_cpu, phys_cpu_present_map);
166 __cpu_number_map[boot_cpu] = 0; 166 __cpu_number_map[boot_cpu] = 0;
167 __cpu_logical_map[0] = boot_cpu; 167 __cpu_logical_map[0] = boot_cpu;
168 cpu_set(0, cpu_possible_map); 168 set_cpu_possible(0, true);
169 169
170 num_cpus = 1; 170 num_cpus = 1;
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void)
177 cpu_set(i, phys_cpu_present_map); 177 cpu_set(i, phys_cpu_present_map);
178 __cpu_number_map[i] = num_cpus; 178 __cpu_number_map[i] = num_cpus;
179 __cpu_logical_map[num_cpus] = i; 179 __cpu_logical_map[num_cpus] = i;
180 cpu_set(num_cpus, cpu_possible_map); 180 set_cpu_possible(num_cpus, true);
181 ++num_cpus; 181 ++num_cpus;
182 } 182 }
183 } 183 }
184 184
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 185 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)phys_cpu_present_map.bits[0], 186 (unsigned long)phys_cpu_present_map.bits[0],
187 (unsigned long)cpu_possible_map.bits[0]); 187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 188
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 189 pr_info("Detected %i Slave CPU(s)\n", num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 190 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 2608752898c0..b71fae231049 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -146,7 +146,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
146} 146}
147 147
148/* 148/*
149 * Detect available CPUs, populate cpu_possible_map before smp_init 149 * Detect available CPUs, populate cpu_possible_mask before smp_init
150 * 150 *
151 * We don't want to start the secondary CPU yet nor do we have a nice probing 151 * We don't want to start the secondary CPU yet nor do we have a nice probing
152 * feature in PMON so we just assume presence of the secondary core. 152 * feature in PMON so we just assume presence of the secondary core.
@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void)
155{ 155{
156 int i; 156 int i;
157 157
158 cpus_clear(cpu_possible_map); 158 init_cpu_possible(cpu_none_mask);
159 159
160 for (i = 0; i < 2; i++) { 160 for (i = 0; i < 2; i++) {
161 cpu_set(i, cpu_possible_map); 161 set_cpu_possible(i, true);
162 __cpu_number_map[i] = i; 162 __cpu_number_map[i] = i;
163 __cpu_logical_map[i] = i; 163 __cpu_logical_map[i] = i;
164 } 164 }
@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus)
169 /* 169 /*
170 * Be paranoid. Enable the IPI only if we're really about to go SMP. 170 * Be paranoid. Enable the IPI only if we're really about to go SMP.
171 */ 171 */
172 if (cpus_weight(cpu_possible_map)) 172 if (num_possible_cpus())
173 set_c0_status(STATUSF_IP5); 173 set_c0_status(STATUSF_IP5);
174} 174}
175 175
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index c6851df9ab74..735b43bf8f82 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
76 /* Only let it join in if it's marked enabled */ 76 /* Only let it join in if it's marked enabled */
77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 (tot_cpus_found != NR_CPUS)) { 78 (tot_cpus_found != NR_CPUS)) {
79 cpu_set(cpuid, cpu_possible_map); 79 set_cpu_possible(cpuid, true);
80 alloc_cpupda(cpuid, tot_cpus_found); 80 alloc_cpupda(cpuid, tot_cpus_found);
81 cpus_found++; 81 cpus_found++;
82 tot_cpus_found++; 82 tot_cpus_found++;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index d667875be564..de88e22694a0 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -138,7 +138,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
138 138
139/* 139/*
140 * Use CFE to find out how many CPUs are available, setting up 140 * Use CFE to find out how many CPUs are available, setting up
141 * cpu_possible_map and the logical/physical mappings. 141 * cpu_possible_mask and the logical/physical mappings.
142 * XXXKW will the boot CPU ever not be physical 0? 142 * XXXKW will the boot CPU ever not be physical 0?
143 * 143 *
144 * Common setup before any secondaries are started 144 * Common setup before any secondaries are started
@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void)
147{ 147{
148 int i, num; 148 int i, num;
149 149
150 cpus_clear(cpu_possible_map); 150 init_cpu_possible(cpumask_of(0));
151 cpu_set(0, cpu_possible_map);
152 __cpu_number_map[0] = 0; 151 __cpu_number_map[0] = 0;
153 __cpu_logical_map[0] = 0; 152 __cpu_logical_map[0] = 0;
154 153
155 for (i = 1, num = 0; i < NR_CPUS; i++) { 154 for (i = 1, num = 0; i < NR_CPUS; i++) {
156 if (cfe_cpu_stop(i) == 0) { 155 if (cfe_cpu_stop(i) == 0) {
157 cpu_set(i, cpu_possible_map); 156 set_cpu_possible(i, true);
158 __cpu_number_map[i] = ++num; 157 __cpu_number_map[i] = ++num;
159 __cpu_logical_map[num] = i; 158 __cpu_logical_map[num] = i;
160 } 159 }
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 38e7f6bd7922..285cfef4ebc0 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
126 126
127/* 127/*
128 * Use CFE to find out how many CPUs are available, setting up 128 * Use CFE to find out how many CPUs are available, setting up
129 * cpu_possible_map and the logical/physical mappings. 129 * cpu_possible_mask and the logical/physical mappings.
130 * XXXKW will the boot CPU ever not be physical 0? 130 * XXXKW will the boot CPU ever not be physical 0?
131 * 131 *
132 * Common setup before any secondaries are started 132 * Common setup before any secondaries are started
@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void)
135{ 135{
136 int i, num; 136 int i, num;
137 137
138 cpus_clear(cpu_possible_map); 138 init_cpu_possible(cpumask_of(0));
139 cpu_set(0, cpu_possible_map);
140 __cpu_number_map[0] = 0; 139 __cpu_number_map[0] = 0;
141 __cpu_logical_map[0] = 0; 140 __cpu_logical_map[0] = 0;
142 141
143 for (i = 1, num = 0; i < NR_CPUS; i++) { 142 for (i = 1, num = 0; i < NR_CPUS; i++) {
144 if (cfe_cpu_stop(i) == 0) { 143 if (cfe_cpu_stop(i) == 0) {
145 cpu_set(i, cpu_possible_map); 144 set_cpu_possible(i, true);
146 __cpu_number_map[i] = ++num; 145 __cpu_number_map[i] = ++num;
147 __cpu_logical_map[num] = i; 146 __cpu_logical_map[num] = i;
148 } 147 }