diff options
| author | David Howells <dhowells@redhat.com> | 2010-10-27 12:28:49 -0400 |
|---|---|---|
| committer | David Howells <dhowells@redhat.com> | 2010-10-27 12:28:49 -0400 |
| commit | 492e675116003b99dfcf0fa70084027e86bc0161 (patch) | |
| tree | c799a3c3f3226700e44dfe323f7c3f1764755049 | |
| parent | 8f19e3daf3fffee9e18a8812067a6a4b538ae6c8 (diff) | |
MN10300: Rename __flush_tlb*() to local_flush_tlb*()
Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready
to differentiate local from global TLB flushes when SMP is introduced.
Whilst we're at it, get rid of __flush_tlb_global() and make
local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
| -rw-r--r-- | arch/mn10300/include/asm/highmem.h | 4 | ||||
| -rw-r--r-- | arch/mn10300/include/asm/mmu_context.h | 2 | ||||
| -rw-r--r-- | arch/mn10300/include/asm/tlbflush.h | 56 | ||||
| -rw-r--r-- | arch/mn10300/mm/init.c | 2 | ||||
| -rw-r--r-- | arch/mn10300/mm/mmu-context.c | 4 | ||||
| -rw-r--r-- | arch/mn10300/mm/pgtable.c | 2 |
6 files changed, 42 insertions, 28 deletions
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h index f577ba2268ca..3817d9f34e72 100644 --- a/arch/mn10300/include/asm/highmem.h +++ b/arch/mn10300/include/asm/highmem.h | |||
| @@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page) | |||
| 87 | BUG(); | 87 | BUG(); |
| 88 | #endif | 88 | #endif |
| 89 | set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); | 89 | set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); |
| 90 | __flush_tlb_one(vaddr); | 90 | local_flush_tlb_one(vaddr); |
| 91 | 91 | ||
| 92 | return vaddr; | 92 | return vaddr; |
| 93 | } | 93 | } |
| @@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr) | |||
| 116 | * this pte without first remap it | 116 | * this pte without first remap it |
| 117 | */ | 117 | */ |
| 118 | pte_clear(kmap_pte - idx); | 118 | pte_clear(kmap_pte - idx); |
| 119 | __flush_tlb_one(vaddr); | 119 | local_flush_tlb_one(vaddr); |
| 120 | } | 120 | } |
| 121 | #endif | 121 | #endif |
| 122 | pagefault_enable(); | 122 | pagefault_enable(); |
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index cb294c244de3..24d63f0f7377 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h | |||
| @@ -58,7 +58,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm) | |||
| 58 | if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { | 58 | if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { |
| 59 | /* we exhausted the TLB PIDs of this version on this CPU, so we | 59 | /* we exhausted the TLB PIDs of this version on this CPU, so we |
| 60 | * flush this CPU's TLB in its entirety and start new cycle */ | 60 | * flush this CPU's TLB in its entirety and start new cycle */ |
| 61 | flush_tlb_all(); | 61 | local_flush_tlb_all(); |
| 62 | 62 | ||
| 63 | /* fix the TLB version if needed (we avoid version #0 so as to | 63 | /* fix the TLB version if needed (we avoid version #0 so as to |
| 64 | * distingush MMU_NO_CONTEXT) */ | 64 | * distingush MMU_NO_CONTEXT) */ |
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h index 1a7e29281c5d..5d54bf57e6c3 100644 --- a/arch/mn10300/include/asm/tlbflush.h +++ b/arch/mn10300/include/asm/tlbflush.h | |||
| @@ -13,21 +13,37 @@ | |||
| 13 | 13 | ||
| 14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
| 15 | 15 | ||
| 16 | #define __flush_tlb() \ | 16 | /** |
| 17 | do { \ | 17 | * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs |
| 18 | int w; \ | 18 | */ |
| 19 | __asm__ __volatile__ \ | 19 | static inline void local_flush_tlb(void) |
| 20 | (" mov %1,%0 \n" \ | 20 | { |
| 21 | " or %2,%0 \n" \ | 21 | int w; |
| 22 | " mov %0,%1 \n" \ | 22 | asm volatile( |
| 23 | : "=d"(w) \ | 23 | " mov %1,%0 \n" |
| 24 | : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \ | 24 | " or %2,%0 \n" |
| 25 | : "cc", "memory" \ | 25 | " mov %0,%1 \n" |
| 26 | ); \ | 26 | : "=d"(w) |
| 27 | } while (0) | 27 | : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) |
| 28 | : "cc", "memory"); | ||
| 29 | } | ||
| 28 | 30 | ||
| 29 | #define __flush_tlb_all() __flush_tlb() | 31 | /** |
| 30 | #define __flush_tlb_one(addr) __flush_tlb() | 32 | * local_flush_tlb_all - Flush all entries from the local CPU's TLBs |
| 33 | */ | ||
| 34 | #define local_flush_tlb_all() local_flush_tlb() | ||
| 35 | |||
| 36 | /** | ||
| 37 | * local_flush_tlb_one - Flush one entry from the local CPU's TLBs | ||
| 38 | */ | ||
| 39 | #define local_flush_tlb_one(addr) local_flush_tlb() | ||
| 40 | |||
| 41 | /** | ||
| 42 | * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs | ||
| 43 | * @mm: The MM to flush for | ||
| 44 | * @addr: The address of the target page in RAM (not its page struct) | ||
| 45 | */ | ||
| 46 | extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr); | ||
| 31 | 47 | ||
| 32 | 48 | ||
| 33 | /* | 49 | /* |
| @@ -43,14 +59,14 @@ do { \ | |||
| 43 | #define flush_tlb_all() \ | 59 | #define flush_tlb_all() \ |
| 44 | do { \ | 60 | do { \ |
| 45 | preempt_disable(); \ | 61 | preempt_disable(); \ |
| 46 | __flush_tlb_all(); \ | 62 | local_flush_tlb_all(); \ |
| 47 | preempt_enable(); \ | 63 | preempt_enable(); \ |
| 48 | } while (0) | 64 | } while (0) |
| 49 | 65 | ||
| 50 | #define flush_tlb_mm(mm) \ | 66 | #define flush_tlb_mm(mm) \ |
| 51 | do { \ | 67 | do { \ |
| 52 | preempt_disable(); \ | 68 | preempt_disable(); \ |
| 53 | __flush_tlb_all(); \ | 69 | local_flush_tlb_all(); \ |
| 54 | preempt_enable(); \ | 70 | preempt_enable(); \ |
| 55 | } while (0) | 71 | } while (0) |
| 56 | 72 | ||
| @@ -59,13 +75,13 @@ do { \ | |||
| 59 | unsigned long __s __attribute__((unused)) = (start); \ | 75 | unsigned long __s __attribute__((unused)) = (start); \ |
| 60 | unsigned long __e __attribute__((unused)) = (end); \ | 76 | unsigned long __e __attribute__((unused)) = (end); \ |
| 61 | preempt_disable(); \ | 77 | preempt_disable(); \ |
| 62 | __flush_tlb_all(); \ | 78 | local_flush_tlb_all(); \ |
| 63 | preempt_enable(); \ | 79 | preempt_enable(); \ |
| 64 | } while (0) | 80 | } while (0) |
| 65 | 81 | ||
| 82 | #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr) | ||
| 83 | #define flush_tlb() flush_tlb_all() | ||
| 66 | 84 | ||
| 67 | #define __flush_tlb_global() flush_tlb_all() | ||
| 68 | #define flush_tlb() flush_tlb_all() | ||
| 69 | #define flush_tlb_kernel_range(start, end) \ | 85 | #define flush_tlb_kernel_range(start, end) \ |
| 70 | do { \ | 86 | do { \ |
| 71 | unsigned long __s __attribute__((unused)) = (start); \ | 87 | unsigned long __s __attribute__((unused)) = (start); \ |
| @@ -73,8 +89,6 @@ do { \ | |||
| 73 | flush_tlb_all(); \ | 89 | flush_tlb_all(); \ |
| 74 | } while (0) | 90 | } while (0) |
| 75 | 91 | ||
| 76 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); | ||
| 77 | |||
| 78 | #define flush_tlb_pgtables(mm, start, end) do {} while (0) | 92 | #define flush_tlb_pgtables(mm, start, end) do {} while (0) |
| 79 | 93 | ||
| 80 | #endif /* _ASM_TLBFLUSH_H */ | 94 | #endif /* _ASM_TLBFLUSH_H */ |
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index f86c28315a8e..1daf97fd7c99 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c | |||
| @@ -73,7 +73,7 @@ void __init paging_init(void) | |||
| 73 | /* pass the memory from the bootmem allocator to the main allocator */ | 73 | /* pass the memory from the bootmem allocator to the main allocator */ |
| 74 | free_area_init(zones_size); | 74 | free_area_init(zones_size); |
| 75 | 75 | ||
| 76 | __flush_tlb_all(); | 76 | local_flush_tlb_all(); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c index 36ba02191d40..3d83966e30e1 100644 --- a/arch/mn10300/mm/mmu-context.c +++ b/arch/mn10300/mm/mmu-context.c | |||
| @@ -23,7 +23,7 @@ unsigned long mmu_context_cache[NR_CPUS] = { | |||
| 23 | /* | 23 | /* |
| 24 | * flush the specified TLB entry | 24 | * flush the specified TLB entry |
| 25 | */ | 25 | */ |
| 26 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | 26 | void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr) |
| 27 | { | 27 | { |
| 28 | unsigned long pteu, cnx, flags; | 28 | unsigned long pteu, cnx, flags; |
| 29 | 29 | ||
| @@ -33,7 +33,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |||
| 33 | * interference from vmalloc'd regions */ | 33 | * interference from vmalloc'd regions */ |
| 34 | local_irq_save(flags); | 34 | local_irq_save(flags); |
| 35 | 35 | ||
| 36 | cnx = mm_context(vma->vm_mm); | 36 | cnx = mm_context(mm); |
| 37 | 37 | ||
| 38 | if (cnx != MMU_NO_CONTEXT) { | 38 | if (cnx != MMU_NO_CONTEXT) { |
| 39 | pteu = addr | (cnx & 0x000000ffUL); | 39 | pteu = addr | (cnx & 0x000000ffUL); |
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index 9c1624c9e4e9..450f7ba3f8f2 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c | |||
| @@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
| 59 | * It's enough to flush this one mapping. | 59 | * It's enough to flush this one mapping. |
| 60 | * (PGE mappings get flushed as well) | 60 | * (PGE mappings get flushed as well) |
| 61 | */ | 61 | */ |
| 62 | __flush_tlb_one(vaddr); | 62 | local_flush_tlb_one(vaddr); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 65 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
