aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:38 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commitf048aace29e007f2b642097e2da8231e0e9cce2d (patch)
tree5e99b1d1d37817703132e97388994386a7bee8da
parent7c03d653cd257793dc40520c94e229b5fd0578e7 (diff)
powerpc/mm: Add SMP support to no-hash TLB handling
This commit moves the whole no-hash TLB handling out of line into a new tlb_nohash.c file, and implements some basic SMP support using IPIs and/or broadcast tlbivax instructions. Note that I'm using local invalidations for D->I cache coherency. At worst, if another processor is trying to execute the same and has the old entry in its TLB, it will just take a fault and re-do the TLB flush locally (it won't re-do the cache flush in any case). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/include/asm/highmem.h4
-rw-r--r--arch/powerpc/include/asm/mmu.h16
-rw-r--r--arch/powerpc/include/asm/tlbflush.h84
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/tlb_hash32.c4
-rw-r--r--arch/powerpc/mm/tlb_nohash.c209
10 files changed, 281 insertions, 57 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 7dc52eca8b67..fd97e501aa6a 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -85,7 +85,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
85 BUG_ON(!pte_none(*(kmap_pte-idx))); 85 BUG_ON(!pte_none(*(kmap_pte-idx)));
86#endif 86#endif
87 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 87 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
88 local_flush_tlb_page(vaddr); 88 local_flush_tlb_page(NULL, vaddr);
89 89
90 return (void*) vaddr; 90 return (void*) vaddr;
91} 91}
@@ -113,7 +113,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
113 * this pte without first remap it 113 * this pte without first remap it
114 */ 114 */
115 pte_clear(&init_mm, vaddr, kmap_pte-idx); 115 pte_clear(&init_mm, vaddr, kmap_pte-idx);
116 local_flush_tlb_page(vaddr); 116 local_flush_tlb_page(NULL, vaddr);
117#endif 117#endif
118 pagefault_enable(); 118 pagefault_enable();
119} 119}
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index dc8c0aef5e6c..6e7639911318 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -30,6 +30,22 @@
30 */ 30 */
31#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) 31#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
32 32
33/* Enable use of broadcast TLB invalidations. We don't always set it
34 * on processors that support it due to other constraints with the
35 * use of such invalidations
36 */
37#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
38
39/* Enable use of tlbilx invalidate-by-PID variant.
40 */
41#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
42
43/* This indicates that the processor cannot handle multiple outstanding
44 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
45 * around such invalidate forms.
46 */
47#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
48
33#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
34#include <asm/cputable.h> 50#include <asm/cputable.h>
35 51
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 9ed363d3de44..8c39b27c1ed7 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,7 +6,9 @@
6 * 6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - local_flush_tlb_page(vmaddr) flushes one page on the local processor 9 * - local_flush_tlb_mm(mm) flushes the specified mm context on
10 * the local processor
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
10 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
11 * - flush_tlb_range(vma, start, end) flushes a range of pages 13 * - flush_tlb_range(vma, start, end) flushes a range of pages
12 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -18,7 +20,7 @@
18 */ 20 */
19#ifdef __KERNEL__ 21#ifdef __KERNEL__
20 22
21#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 23#ifdef CONFIG_PPC_MMU_NOHASH
22/* 24/*
23 * TLB flushing for software loaded TLB chips 25 * TLB flushing for software loaded TLB chips
24 * 26 *
@@ -31,10 +33,10 @@
31 33
32#define MMU_NO_CONTEXT ((unsigned int)-1) 34#define MMU_NO_CONTEXT ((unsigned int)-1)
33 35
34extern void _tlbie(unsigned long address, unsigned int pid);
35extern void _tlbil_all(void); 36extern void _tlbil_all(void);
36extern void _tlbil_pid(unsigned int pid); 37extern void _tlbil_pid(unsigned int pid);
37extern void _tlbil_va(unsigned long address, unsigned int pid); 38extern void _tlbil_va(unsigned long address, unsigned int pid);
39extern void _tlbivax_bcast(unsigned long address, unsigned int pid);
38 40
39#if defined(CONFIG_40x) || defined(CONFIG_8xx) 41#if defined(CONFIG_40x) || defined(CONFIG_8xx)
40#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 42#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
@@ -42,48 +44,26 @@ extern void _tlbil_va(unsigned long address, unsigned int pid);
42extern void _tlbia(void); 44extern void _tlbia(void);
43#endif 45#endif
44 46
45static inline void local_flush_tlb_mm(struct mm_struct *mm) 47extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
46{ 48 unsigned long end);
47 _tlbil_pid(mm->context.id); 49extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
48}
49
50static inline void flush_tlb_mm(struct mm_struct *mm)
51{
52 _tlbil_pid(mm->context.id);
53}
54
55static inline void local_flush_tlb_page(unsigned long vmaddr)
56{
57 _tlbil_va(vmaddr, 0);
58}
59
60static inline void flush_tlb_page(struct vm_area_struct *vma,
61 unsigned long vmaddr)
62{
63 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
64}
65 50
66static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 51extern void local_flush_tlb_mm(struct mm_struct *mm);
67 unsigned long vmaddr) 52extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
68{
69 flush_tlb_page(vma, vmaddr);
70}
71 53
72static inline void flush_tlb_range(struct vm_area_struct *vma, 54#ifdef CONFIG_SMP
73 unsigned long start, unsigned long end) 55extern void flush_tlb_mm(struct mm_struct *mm);
74{ 56extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
75 _tlbil_pid(vma->vm_mm->context.id); 57#else
76} 58#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
59#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
60#endif
61#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
77 62
78static inline void flush_tlb_kernel_range(unsigned long start, 63#elif defined(CONFIG_PPC_STD_MMU_32)
79 unsigned long end)
80{
81 _tlbil_pid(0);
82}
83 64
84#elif defined(CONFIG_PPC32)
85/* 65/*
86 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 66 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
87 */ 67 */
88extern void _tlbie(unsigned long address); 68extern void _tlbie(unsigned long address);
89extern void _tlbia(void); 69extern void _tlbia(void);
@@ -94,14 +74,20 @@ extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr
94extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 74extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
95 unsigned long end); 75 unsigned long end);
96extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 76extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
97static inline void local_flush_tlb_page(unsigned long vmaddr) 77static inline void local_flush_tlb_page(struct vm_area_struct *vma,
78 unsigned long vmaddr)
98{ 79{
99 flush_tlb_page(NULL, vmaddr); 80 flush_tlb_page(vma, vmaddr);
81}
82static inline void local_flush_tlb_mm(struct mm_struct *mm)
83{
84 flush_tlb_mm(mm);
100} 85}
101 86
102#else 87#elif defined(CONFIG_PPC_STD_MMU_64)
88
103/* 89/*
104 * TLB flushing for 64-bit has-MMU CPUs 90 * TLB flushing for 64-bit hash-MMU CPUs
105 */ 91 */
106 92
107#include <linux/percpu.h> 93#include <linux/percpu.h>
@@ -151,11 +137,16 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
151extern void flush_hash_range(unsigned long number, int local); 137extern void flush_hash_range(unsigned long number, int local);
152 138
153 139
140static inline void local_flush_tlb_mm(struct mm_struct *mm)
141{
142}
143
154static inline void flush_tlb_mm(struct mm_struct *mm) 144static inline void flush_tlb_mm(struct mm_struct *mm)
155{ 145{
156} 146}
157 147
158static inline void local_flush_tlb_page(unsigned long vmaddr) 148static inline void local_flush_tlb_page(struct vm_area_struct *vma,
149 unsigned long vmaddr)
159{ 150{
160} 151}
161 152
@@ -183,7 +174,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
183extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 174extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
184 unsigned long end); 175 unsigned long end);
185 176
186 177#else
178#error Unsupported MMU type
187#endif 179#endif
188 180
189#endif /*__KERNEL__ */ 181#endif /*__KERNEL__ */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 5c33bc14bd9f..2c2ab89f0b64 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -29,6 +29,7 @@
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/kexec.h> 31#include <asm/kexec.h>
32#include <asm/bug.h>
32 33
33 .text 34 .text
34 35
@@ -496,6 +497,14 @@ _GLOBAL(_tlbil_va)
496 blr 497 blr
497#endif /* CONFIG_FSL_BOOKE */ 498#endif /* CONFIG_FSL_BOOKE */
498 499
500/*
501 * Nobody implements this yet
502 */
503_GLOBAL(_tlbivax_bcast)
5041: trap
505 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
506 blr
507
499 508
500/* 509/*
501 * Flush instruction cache. 510 * Flush instruction cache.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 341b3d3048e0..dcec1325d340 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe);
116 116
117#ifndef CONFIG_PPC64 117#ifndef CONFIG_PPC64
118EXPORT_SYMBOL(flush_instruction_cache); 118EXPORT_SYMBOL(flush_instruction_cache);
119EXPORT_SYMBOL(flush_tlb_kernel_range);
120EXPORT_SYMBOL(flush_tlb_page);
121EXPORT_SYMBOL(_tlbie);
122#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
123EXPORT_SYMBOL(_tlbil_va);
124#endif
125#endif 119#endif
126EXPORT_SYMBOL(__flush_icache_range); 120EXPORT_SYMBOL(__flush_icache_range);
127EXPORT_SYMBOL(flush_dcache_range); 121EXPORT_SYMBOL(flush_dcache_range);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 923bd3fa7d64..af987df8d5a3 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -9,7 +9,7 @@ endif
9obj-y := fault.o mem.o pgtable.o \ 9obj-y := fault.o mem.o pgtable.o \
10 init_$(CONFIG_WORD_SIZE).o \ 10 init_$(CONFIG_WORD_SIZE).o \
11 pgtable_$(CONFIG_WORD_SIZE).o 11 pgtable_$(CONFIG_WORD_SIZE).o
12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o 12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o
13hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 13hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
14obj-$(CONFIG_PPC64) += hash_utils_64.o \ 14obj-$(CONFIG_PPC64) += hash_utils_64.o \
15 slb_low.o slb.o stab.o \ 15 slb_low.o slb.o stab.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 7df0409107ad..87f1f955dea4 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -284,7 +284,7 @@ good_area:
284 } 284 }
285 pte_update(ptep, 0, _PAGE_HWEXEC | 285 pte_update(ptep, 0, _PAGE_HWEXEC |
286 _PAGE_ACCESSED); 286 _PAGE_ACCESSED);
287 _tlbie(address, mm->context.id); 287 local_flush_tlb_page(vma, address);
288 pte_unmap_unlock(ptep, ptl); 288 pte_unmap_unlock(ptep, ptl);
289 up_read(&mm->mmap_sem); 289 up_read(&mm->mmap_sem);
290 return 0; 290 return 0;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9e1a1da6e52..8fee696fb795 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
488 * we invalidate the TLB here, thus avoiding dcbst 488 * we invalidate the TLB here, thus avoiding dcbst
489 * misbehaviour. 489 * misbehaviour.
490 */ 490 */
491 _tlbie(address, 0 /* 8xx doesn't care about PID */); 491 _tlbil_va(address, 0 /* 8xx doesn't care about PID */);
492#endif 492#endif
493 /* The _PAGE_USER test should really be _PAGE_EXEC, but 493 /* The _PAGE_USER test should really be _PAGE_EXEC, but
494 * older glibc versions execute some code from no-exec 494 * older glibc versions execute some code from no-exec
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index f9a47fee3927..65190587a365 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
137 flush_range(&init_mm, start, end); 137 flush_range(&init_mm, start, end);
138 FINISH_FLUSH; 138 FINISH_FLUSH;
139} 139}
140EXPORT_SYMBOL(flush_tlb_kernel_range);
140 141
141/* 142/*
142 * Flush all the (user) entries for the address space described by mm. 143 * Flush all the (user) entries for the address space described by mm.
@@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm)
160 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 161 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
161 FINISH_FLUSH; 162 FINISH_FLUSH;
162} 163}
164EXPORT_SYMBOL(flush_tlb_mm);
163 165
164void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 166void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
165{ 167{
@@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
176 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 178 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
177 FINISH_FLUSH; 179 FINISH_FLUSH;
178} 180}
181EXPORT_SYMBOL(flush_tlb_page);
179 182
180/* 183/*
181 * For each address in the range, find the pte for the address 184 * For each address in the range, find the pte for the address
@@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
188 flush_range(vma->vm_mm, start, end); 191 flush_range(vma->vm_mm, start, end);
189 FINISH_FLUSH; 192 FINISH_FLUSH;
190} 193}
194EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
new file mode 100644
index 000000000000..803a64c02b06
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -0,0 +1,209 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
12 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/init.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/preempt.h>
36#include <linux/spinlock.h>
37
38#include <asm/tlbflush.h>
39#include <asm/tlb.h>
40
41#include "mmu_decl.h"
42
43/*
44 * Base TLB flushing operations:
45 *
46 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
47 * - flush_tlb_page(vma, vmaddr) flushes one page
48 * - flush_tlb_range(vma, start, end) flushes a range of pages
49 * - flush_tlb_kernel_range(start, end) flushes kernel pages
50 *
51 * - local_* variants of page and mm only apply to the current
52 * processor
53 */
54
55/*
56 * These are the base non-SMP variants of page and mm flushing
57 */
58void local_flush_tlb_mm(struct mm_struct *mm)
59{
60 unsigned int pid;
61
62 preempt_disable();
63 pid = mm->context.id;
64 if (pid != MMU_NO_CONTEXT)
65 _tlbil_pid(pid);
66 preempt_enable();
67}
68EXPORT_SYMBOL(local_flush_tlb_mm);
69
70void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
71{
72 unsigned int pid;
73
74 preempt_disable();
75 pid = vma ? vma->vm_mm->context.id : 0;
76 if (pid != MMU_NO_CONTEXT)
77 _tlbil_va(vmaddr, pid);
78 preempt_enable();
79}
80EXPORT_SYMBOL(local_flush_tlb_page);
81
82
83/*
84 * And here are the SMP non-local implementations
85 */
86#ifdef CONFIG_SMP
87
88static DEFINE_SPINLOCK(tlbivax_lock);
89
90struct tlb_flush_param {
91 unsigned long addr;
92 unsigned int pid;
93};
94
95static void do_flush_tlb_mm_ipi(void *param)
96{
97 struct tlb_flush_param *p = param;
98
99 _tlbil_pid(p ? p->pid : 0);
100}
101
102static void do_flush_tlb_page_ipi(void *param)
103{
104 struct tlb_flush_param *p = param;
105
106 _tlbil_va(p->addr, p->pid);
107}
108
109
110/* Note on invalidations and PID:
111 *
112 * We snapshot the PID with preempt disabled. At this point, it can still
113 * change either because:
114 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
115 * - we are invaliating some target that isn't currently running here
116 * and is concurrently acquiring a new PID on another CPU
117 * - some other CPU is re-acquiring a lost PID for this mm
118 * etc...
119 *
120 * However, this shouldn't be a problem as we only guarantee
121 * invalidation of TLB entries present prior to this call, so we
122 * don't care about the PID changing, and invalidating a stale PID
123 * is generally harmless.
124 */
125
126void flush_tlb_mm(struct mm_struct *mm)
127{
128 cpumask_t cpu_mask;
129 unsigned int pid;
130
131 preempt_disable();
132 pid = mm->context.id;
133 if (unlikely(pid == MMU_NO_CONTEXT))
134 goto no_context;
135 cpu_mask = mm->cpu_vm_mask;
136 cpu_clear(smp_processor_id(), cpu_mask);
137 if (!cpus_empty(cpu_mask)) {
138 struct tlb_flush_param p = { .pid = pid };
139 smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
140 }
141 _tlbil_pid(pid);
142 no_context:
143 preempt_enable();
144}
145EXPORT_SYMBOL(flush_tlb_mm);
146
147void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
148{
149 cpumask_t cpu_mask;
150 unsigned int pid;
151
152 preempt_disable();
153 pid = vma ? vma->vm_mm->context.id : 0;
154 if (unlikely(pid == MMU_NO_CONTEXT))
155 goto bail;
156 cpu_mask = vma->vm_mm->cpu_vm_mask;
157 cpu_clear(smp_processor_id(), cpu_mask);
158 if (!cpus_empty(cpu_mask)) {
159 /* If broadcast tlbivax is supported, use it */
160 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
161 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
162 if (lock)
163 spin_lock(&tlbivax_lock);
164 _tlbivax_bcast(vmaddr, pid);
165 if (lock)
166 spin_unlock(&tlbivax_lock);
167 goto bail;
168 } else {
169 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
170 smp_call_function_mask(cpu_mask,
171 do_flush_tlb_page_ipi, &p, 1);
172 }
173 }
174 _tlbil_va(vmaddr, pid);
175 bail:
176 preempt_enable();
177}
178EXPORT_SYMBOL(flush_tlb_page);
179
180#endif /* CONFIG_SMP */
181
182/*
183 * Flush kernel TLB entries in the given range
184 */
185void flush_tlb_kernel_range(unsigned long start, unsigned long end)
186{
187#ifdef CONFIG_SMP
188 preempt_disable();
189 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
190 _tlbil_pid(0);
191 preempt_enable();
192#endif
193 _tlbil_pid(0);
194}
195EXPORT_SYMBOL(flush_tlb_kernel_range);
196
197/*
198 * Currently, for range flushing, we just do a full mm flush. This should
199 * be optimized based on a threshold on the size of the range, since
200 * some implementation can stack multiple tlbivax before a tlbsync but
201 * for now, we keep it that way
202 */
203void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
204 unsigned long end)
205
206{
207 flush_tlb_mm(vma->vm_mm);
208}
209EXPORT_SYMBOL(flush_tlb_range);