aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-05-24 20:12:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:39 -0400
commit8ea9716fd6aa761482caa5d4d64b256ed07ac09f (patch)
tree1dbe6d8bbb9557b27e0a6b0a7d0f3d6934926368 /arch/mn10300
parent81ee42baa433881bcb471aa6366e2f885a33f2fb (diff)
mn10300: convert old cpumask API into new one
Adapt to the new API. We plan to remove old cpumask APIs later. Thus this patch converts them into the new one. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: David Howells <dhowells@redhat.com> Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Cc: Hugh Dickins <hughd@google.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/mn10300')
-rw-r--r--arch/mn10300/kernel/irq.c16
-rw-r--r--arch/mn10300/kernel/smp.c75
-rw-r--r--arch/mn10300/mm/cache-smp.c8
-rw-r--r--arch/mn10300/mm/tlb-smp.c32
4 files changed, 68 insertions, 63 deletions
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 86af0d7d0771..2623d19f4f4c 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -87,7 +87,7 @@ static void mn10300_cpupic_mask_ack(struct irq_data *d)
87 tmp2 = GxICR(irq); 87 tmp2 = GxICR(irq);
88 88
89 irq_affinity_online[irq] = 89 irq_affinity_online[irq] =
90 any_online_cpu(*d->affinity); 90 cpumask_any_and(d->affinity, cpu_online_mask);
91 CROSS_GxICR(irq, irq_affinity_online[irq]) = 91 CROSS_GxICR(irq, irq_affinity_online[irq]) =
92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; 92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
@@ -124,7 +124,8 @@ static void mn10300_cpupic_unmask_clear(struct irq_data *d)
124 } else { 124 } else {
125 tmp = GxICR(irq); 125 tmp = GxICR(irq);
126 126
127 irq_affinity_online[irq] = any_online_cpu(*d->affinity); 127 irq_affinity_online[irq] = cpumask_any_and(d->affinity,
128 cpu_online_mask);
128 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
129 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
130 } 131 }
@@ -366,11 +367,11 @@ void migrate_irqs(void)
366 if (irqd_is_per_cpu(data)) 367 if (irqd_is_per_cpu(data))
367 continue; 368 continue;
368 369
369 if (cpu_isset(self, data->affinity) && 370 if (cpumask_test_cpu(self, &data->affinity) &&
370 !cpus_intersects(irq_affinity[irq], cpu_online_map)) { 371 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
371 int cpu_id; 372 int cpu_id;
372 cpu_id = first_cpu(cpu_online_map); 373 cpu_id = cpumask_first(cpu_online_mask);
373 cpu_set(cpu_id, data->affinity); 374 cpumask_set_cpu(cpu_id, &data->affinity);
374 } 375 }
375 /* We need to operate irq_affinity_online atomically. */ 376 /* We need to operate irq_affinity_online atomically. */
376 arch_local_cli_save(flags); 377 arch_local_cli_save(flags);
@@ -381,7 +382,8 @@ void migrate_irqs(void)
381 GxICR(irq) = x & GxICR_LEVEL; 382 GxICR(irq) = x & GxICR_LEVEL;
382 tmp = GxICR(irq); 383 tmp = GxICR(irq);
383 384
384 new = any_online_cpu(data->affinity); 385 new = cpumask_any_and(&data->affinity,
386 cpu_online_mask);
385 irq_affinity_online[irq] = new; 387 irq_affinity_online[irq] = new;
386 388
387 CROSS_GxICR(irq, new) = 389 CROSS_GxICR(irq, new) =
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 83fb27912231..9242e9fcc564 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -309,7 +309,7 @@ static void send_IPI_mask(const cpumask_t *cpumask, int irq)
309 u16 tmp; 309 u16 tmp;
310 310
311 for (i = 0; i < NR_CPUS; i++) { 311 for (i = 0; i < NR_CPUS; i++) {
312 if (cpu_isset(i, *cpumask)) { 312 if (cpumask_test_cpu(i, cpumask)) {
313 /* send IPI */ 313 /* send IPI */
314 tmp = CROSS_GxICR(irq, i); 314 tmp = CROSS_GxICR(irq, i);
315 CROSS_GxICR(irq, i) = 315 CROSS_GxICR(irq, i) =
@@ -342,8 +342,8 @@ void send_IPI_allbutself(int irq)
342{ 342{
343 cpumask_t cpumask; 343 cpumask_t cpumask;
344 344
345 cpumask = cpu_online_map; 345 cpumask_copy(&cpumask, cpu_online_mask);
346 cpu_clear(smp_processor_id(), cpumask); 346 cpumask_clear_cpu(smp_processor_id(), &cpumask);
347 send_IPI_mask(&cpumask, irq); 347 send_IPI_mask(&cpumask, irq);
348} 348}
349 349
@@ -393,8 +393,8 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
393 393
394 data.func = func; 394 data.func = func;
395 data.info = info; 395 data.info = info;
396 data.started = cpu_online_map; 396 cpumask_copy(&data.started, cpu_online_mask);
397 cpu_clear(smp_processor_id(), data.started); 397 cpumask_clear_cpu(smp_processor_id(), &data.started);
398 data.wait = wait; 398 data.wait = wait;
399 if (wait) 399 if (wait)
400 data.finished = data.started; 400 data.finished = data.started;
@@ -410,14 +410,14 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { 410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
411 for (cnt = 0; 411 for (cnt = 0;
412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
413 !cpus_empty(data.started); 413 !cpumask_empty(&data.started);
414 cnt++) 414 cnt++)
415 mdelay(1); 415 mdelay(1);
416 416
417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { 417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
418 for (cnt = 0; 418 for (cnt = 0;
419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
420 !cpus_empty(data.finished); 420 !cpumask_empty(&data.finished);
421 cnt++) 421 cnt++)
422 mdelay(1); 422 mdelay(1);
423 } 423 }
@@ -428,10 +428,10 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
428 } else { 428 } else {
429 /* If timeout value is zero, wait until cpumask has been 429 /* If timeout value is zero, wait until cpumask has been
430 * cleared */ 430 * cleared */
431 while (!cpus_empty(data.started)) 431 while (!cpumask_empty(&data.started))
432 barrier(); 432 barrier();
433 if (wait) 433 if (wait)
434 while (!cpus_empty(data.finished)) 434 while (!cpumask_empty(&data.finished))
435 barrier(); 435 barrier();
436 } 436 }
437 437
@@ -472,12 +472,12 @@ void stop_this_cpu(void *unused)
472#endif /* CONFIG_GDBSTUB */ 472#endif /* CONFIG_GDBSTUB */
473 473
474 flags = arch_local_cli_save(); 474 flags = arch_local_cli_save();
475 cpu_clear(smp_processor_id(), cpu_online_map); 475 set_cpu_online(smp_processor_id(), false);
476 476
477 while (!stopflag) 477 while (!stopflag)
478 cpu_relax(); 478 cpu_relax();
479 479
480 cpu_set(smp_processor_id(), cpu_online_map); 480 set_cpu_online(smp_processor_id(), true);
481 arch_local_irq_restore(flags); 481 arch_local_irq_restore(flags);
482} 482}
483 483
@@ -529,12 +529,13 @@ void smp_nmi_call_function_interrupt(void)
529 * execute the function 529 * execute the function
530 */ 530 */
531 smp_mb(); 531 smp_mb();
532 cpu_clear(smp_processor_id(), nmi_call_data->started); 532 cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
533 (*func)(info); 533 (*func)(info);
534 534
535 if (wait) { 535 if (wait) {
536 smp_mb(); 536 smp_mb();
537 cpu_clear(smp_processor_id(), nmi_call_data->finished); 537 cpumask_clear_cpu(smp_processor_id(),
538 &nmi_call_data->finished);
538 } 539 }
539} 540}
540 541
@@ -657,7 +658,7 @@ int __init start_secondary(void *unused)
657{ 658{
658 smp_cpu_init(); 659 smp_cpu_init();
659 smp_callin(); 660 smp_callin();
660 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 661 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
661 cpu_relax(); 662 cpu_relax();
662 663
663 local_flush_tlb(); 664 local_flush_tlb();
@@ -780,13 +781,14 @@ static int __init do_boot_cpu(int phy_id)
780 781
781 if (send_status == 0) { 782 if (send_status == 0) {
782 /* Allow AP to start initializing */ 783 /* Allow AP to start initializing */
783 cpu_set(cpu_id, cpu_callout_map); 784 cpumask_set_cpu(cpu_id, &cpu_callout_map);
784 785
785 /* Wait for setting cpu_callin_map */ 786 /* Wait for setting cpu_callin_map */
786 timeout = 0; 787 timeout = 0;
787 do { 788 do {
788 udelay(1000); 789 udelay(1000);
789 callin_status = cpu_isset(cpu_id, cpu_callin_map); 790 callin_status = cpumask_test_cpu(cpu_id,
791 &cpu_callin_map);
790 } while (callin_status == 0 && timeout++ < 5000); 792 } while (callin_status == 0 && timeout++ < 5000);
791 793
792 if (callin_status == 0) 794 if (callin_status == 0)
@@ -796,9 +798,9 @@ static int __init do_boot_cpu(int phy_id)
796 } 798 }
797 799
798 if (send_status == GxICR_REQUEST || callin_status == 0) { 800 if (send_status == GxICR_REQUEST || callin_status == 0) {
799 cpu_clear(cpu_id, cpu_callout_map); 801 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
800 cpu_clear(cpu_id, cpu_callin_map); 802 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
801 cpu_clear(cpu_id, cpu_initialized); 803 cpumask_clear_cpu(cpu_id, &cpu_initialized);
802 cpucount--; 804 cpucount--;
803 return 1; 805 return 1;
804 } 806 }
@@ -833,7 +835,7 @@ static void __init smp_callin(void)
833 cpu = smp_processor_id(); 835 cpu = smp_processor_id();
834 timeout = jiffies + (2 * HZ); 836 timeout = jiffies + (2 * HZ);
835 837
836 if (cpu_isset(cpu, cpu_callin_map)) { 838 if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
837 printk(KERN_ERR "CPU#%d already present.\n", cpu); 839 printk(KERN_ERR "CPU#%d already present.\n", cpu);
838 BUG(); 840 BUG();
839 } 841 }
@@ -841,7 +843,7 @@ static void __init smp_callin(void)
841 843
842 /* Wait for AP startup 2s total */ 844 /* Wait for AP startup 2s total */
843 while (time_before(jiffies, timeout)) { 845 while (time_before(jiffies, timeout)) {
844 if (cpu_isset(cpu, cpu_callout_map)) 846 if (cpumask_test_cpu(cpu, &cpu_callout_map))
845 break; 847 break;
846 cpu_relax(); 848 cpu_relax();
847 } 849 }
@@ -861,11 +863,11 @@ static void __init smp_callin(void)
861 smp_store_cpu_info(cpu); 863 smp_store_cpu_info(cpu);
862 864
863 /* Allow the boot processor to continue */ 865 /* Allow the boot processor to continue */
864 cpu_set(cpu, cpu_callin_map); 866 cpumask_set_cpu(cpu, &cpu_callin_map);
865} 867}
866 868
867/** 869/**
868 * smp_online - Set cpu_online_map 870 * smp_online - Set cpu_online_mask
869 */ 871 */
870static void __init smp_online(void) 872static void __init smp_online(void)
871{ 873{
@@ -875,7 +877,7 @@ static void __init smp_online(void)
875 877
876 local_irq_enable(); 878 local_irq_enable();
877 879
878 cpu_set(cpu, cpu_online_map); 880 set_cpu_online(cpu, true);
879 smp_wmb(); 881 smp_wmb();
880} 882}
881 883
@@ -892,13 +894,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
892/* 894/*
893 * smp_prepare_boot_cpu - Set up stuff for the boot processor. 895 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
894 * 896 *
895 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot 897 * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
896 * processor (CPU 0). 898 * processor (CPU 0).
897 */ 899 */
898void __devinit smp_prepare_boot_cpu(void) 900void __devinit smp_prepare_boot_cpu(void)
899{ 901{
900 cpu_set(0, cpu_callout_map); 902 cpumask_set_cpu(0, &cpu_callout_map);
901 cpu_set(0, cpu_callin_map); 903 cpumask_set_cpu(0, &cpu_callin_map);
902 current_thread_info()->cpu = 0; 904 current_thread_info()->cpu = 0;
903} 905}
904 906
@@ -931,16 +933,16 @@ int __devinit __cpu_up(unsigned int cpu)
931 run_wakeup_cpu(cpu); 933 run_wakeup_cpu(cpu);
932#endif /* CONFIG_HOTPLUG_CPU */ 934#endif /* CONFIG_HOTPLUG_CPU */
933 935
934 cpu_set(cpu, smp_commenced_mask); 936 cpumask_set_cpu(cpu, &smp_commenced_mask);
935 937
936 /* Wait 5s total for a response */ 938 /* Wait 5s total for a response */
937 for (timeout = 0 ; timeout < 5000 ; timeout++) { 939 for (timeout = 0 ; timeout < 5000 ; timeout++) {
938 if (cpu_isset(cpu, cpu_online_map)) 940 if (cpu_online(cpu))
939 break; 941 break;
940 udelay(1000); 942 udelay(1000);
941 } 943 }
942 944
943 BUG_ON(!cpu_isset(cpu, cpu_online_map)); 945 BUG_ON(!cpu_online(cpu));
944 return 0; 946 return 0;
945} 947}
946 948
@@ -986,7 +988,7 @@ int __cpu_disable(void)
986 return -EBUSY; 988 return -EBUSY;
987 989
988 migrate_irqs(); 990 migrate_irqs();
989 cpu_clear(cpu, current->active_mm->cpu_vm_mask); 991 cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
990 return 0; 992 return 0;
991} 993}
992 994
@@ -1091,13 +1093,13 @@ static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1091 do { 1093 do {
1092 mn10300_local_dcache_inv_range(start, end); 1094 mn10300_local_dcache_inv_range(start, end);
1093 barrier(); 1095 barrier();
1094 } while (!cpus_empty(nmi_call_func_mask_data.started)); 1096 } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1095 1097
1096 if (wait) { 1098 if (wait) {
1097 do { 1099 do {
1098 mn10300_local_dcache_inv_range(start, end); 1100 mn10300_local_dcache_inv_range(start, end);
1099 barrier(); 1101 barrier();
1100 } while (!cpus_empty(nmi_call_func_mask_data.finished)); 1102 } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1101 } 1103 }
1102 1104
1103 spin_unlock(&smp_nmi_call_lock); 1105 spin_unlock(&smp_nmi_call_lock);
@@ -1108,9 +1110,9 @@ static void restart_wakeup_cpu(void)
1108{ 1110{
1109 unsigned int cpu = smp_processor_id(); 1111 unsigned int cpu = smp_processor_id();
1110 1112
1111 cpu_set(cpu, cpu_callin_map); 1113 cpumask_set_cpu(cpu, &cpu_callin_map);
1112 local_flush_tlb(); 1114 local_flush_tlb();
1113 cpu_set(cpu, cpu_online_map); 1115 set_cpu_online(cpu, true);
1114 smp_wmb(); 1116 smp_wmb();
1115} 1117}
1116 1118
@@ -1141,8 +1143,9 @@ static void sleep_cpu(void *unused)
1141static void run_sleep_cpu(unsigned int cpu) 1143static void run_sleep_cpu(unsigned int cpu)
1142{ 1144{
1143 unsigned long flags; 1145 unsigned long flags;
1144 cpumask_t cpumask = cpumask_of(cpu); 1146 cpumask_t cpumask;
1145 1147
1148 cpumask_copy(&cpumask, &cpumask_of(cpu));
1146 flags = arch_local_cli_save(); 1149 flags = arch_local_cli_save();
1147 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); 1150 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1148 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); 1151 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
index 4a6e9a4b5b27..2d23b9eeee62 100644
--- a/arch/mn10300/mm/cache-smp.c
+++ b/arch/mn10300/mm/cache-smp.c
@@ -74,7 +74,7 @@ void smp_cache_interrupt(void)
74 break; 74 break;
75 } 75 }
76 76
77 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 77 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
78} 78}
79 79
80/** 80/**
@@ -94,12 +94,12 @@ void smp_cache_call(unsigned long opr_mask,
94 smp_cache_mask = opr_mask; 94 smp_cache_mask = opr_mask;
95 smp_cache_start = start; 95 smp_cache_start = start;
96 smp_cache_end = end; 96 smp_cache_end = end;
97 smp_cache_ipi_map = cpu_online_map; 97 cpumask_copy(&smp_cache_ipi_map, cpu_online_mask);
98 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 98 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
99 99
100 send_IPI_allbutself(FLUSH_CACHE_IPI); 100 send_IPI_allbutself(FLUSH_CACHE_IPI);
101 101
102 while (!cpus_empty(smp_cache_ipi_map)) 102 while (!cpumask_empty(&smp_cache_ipi_map))
103 /* nothing. lockup detection does not belong here */ 103 /* nothing. lockup detection does not belong here */
104 mb(); 104 mb();
105} 105}
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index 0b6a5ad1960e..9a777498a916 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -64,7 +64,7 @@ void smp_flush_tlb(void *unused)
64 64
65 cpu_id = get_cpu(); 65 cpu_id = get_cpu();
66 66
67 if (!cpu_isset(cpu_id, flush_cpumask)) 67 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
68 /* This was a BUG() but until someone can quote me the line 68 /* This was a BUG() but until someone can quote me the line
69 * from the intel manual that guarantees an IPI to multiple 69 * from the intel manual that guarantees an IPI to multiple
70 * CPUs is retried _only_ on the erroring CPUs its staying as a 70 * CPUs is retried _only_ on the erroring CPUs its staying as a
@@ -80,7 +80,7 @@ void smp_flush_tlb(void *unused)
80 local_flush_tlb_page(flush_mm, flush_va); 80 local_flush_tlb_page(flush_mm, flush_va);
81 81
82 smp_mb__before_clear_bit(); 82 smp_mb__before_clear_bit();
83 cpu_clear(cpu_id, flush_cpumask); 83 cpumask_clear_cpu(cpu_id, &flush_cpumask);
84 smp_mb__after_clear_bit(); 84 smp_mb__after_clear_bit();
85out: 85out:
86 put_cpu(); 86 put_cpu();
@@ -103,11 +103,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
103 * - we do not send IPIs to as-yet unbooted CPUs. 103 * - we do not send IPIs to as-yet unbooted CPUs.
104 */ 104 */
105 BUG_ON(!mm); 105 BUG_ON(!mm);
106 BUG_ON(cpus_empty(cpumask)); 106 BUG_ON(cpumask_empty(&cpumask));
107 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 107 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
108 108
109 cpus_and(tmp, cpumask, cpu_online_map); 109 cpumask_and(&tmp, &cpumask, cpu_online_mask);
110 BUG_ON(!cpus_equal(cpumask, tmp)); 110 BUG_ON(!cpumask_equal(&cpumask, &tmp));
111 111
112 /* I'm not happy about this global shared spinlock in the MM hot path, 112 /* I'm not happy about this global shared spinlock in the MM hot path,
113 * but we'll see how contended it is. 113 * but we'll see how contended it is.
@@ -128,7 +128,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ 128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 smp_call_function(smp_flush_tlb, NULL, 1); 129 smp_call_function(smp_flush_tlb, NULL, 1);
130 130
131 while (!cpus_empty(flush_cpumask)) 131 while (!cpumask_empty(&flush_cpumask))
132 /* Lockup detection does not belong here */ 132 /* Lockup detection does not belong here */
133 smp_mb(); 133 smp_mb();
134 134
@@ -146,11 +146,11 @@ void flush_tlb_mm(struct mm_struct *mm)
146 cpumask_t cpu_mask; 146 cpumask_t cpu_mask;
147 147
148 preempt_disable(); 148 preempt_disable();
149 cpu_mask = mm->cpu_vm_mask; 149 cpumask_copy(&cpu_mask, mm_cpumask(mm));
150 cpu_clear(smp_processor_id(), cpu_mask); 150 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
151 151
152 local_flush_tlb(); 152 local_flush_tlb();
153 if (!cpus_empty(cpu_mask)) 153 if (!cpumask_empty(&cpu_mask))
154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155 155
156 preempt_enable(); 156 preempt_enable();
@@ -165,11 +165,11 @@ void flush_tlb_current_task(void)
165 cpumask_t cpu_mask; 165 cpumask_t cpu_mask;
166 166
167 preempt_disable(); 167 preempt_disable();
168 cpu_mask = mm->cpu_vm_mask; 168 cpumask_copy(&cpu_mask, mm_cpumask(mm));
169 cpu_clear(smp_processor_id(), cpu_mask); 169 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
170 170
171 local_flush_tlb(); 171 local_flush_tlb();
172 if (!cpus_empty(cpu_mask)) 172 if (!cpumask_empty(&cpu_mask))
173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174 174
175 preempt_enable(); 175 preempt_enable();
@@ -186,11 +186,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
186 cpumask_t cpu_mask; 186 cpumask_t cpu_mask;
187 187
188 preempt_disable(); 188 preempt_disable();
189 cpu_mask = mm->cpu_vm_mask; 189 cpumask_copy(&cpu_mask, mm_cpumask(mm));
190 cpu_clear(smp_processor_id(), cpu_mask); 190 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
191 191
192 local_flush_tlb_page(mm, va); 192 local_flush_tlb_page(mm, va);
193 if (!cpus_empty(cpu_mask)) 193 if (!cpumask_empty(&cpu_mask))
194 flush_tlb_others(cpu_mask, mm, va); 194 flush_tlb_others(cpu_mask, mm, va);
195 195
196 preempt_enable(); 196 preempt_enable();