diff options
Diffstat (limited to 'arch/powerpc/mm/tlb_64.c')
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 196 |
1 files changed, 196 insertions, 0 deletions
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c new file mode 100644 index 000000000000..09ab81a10f4f --- /dev/null +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * This file contains the routines for flushing entries from the | ||
3 | * TLB and MMU hash table. | ||
4 | * | ||
5 | * Derived from arch/ppc64/mm/init.c: | ||
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
7 | * | ||
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
10 | * Copyright (C) 1996 Paul Mackerras | ||
11 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
12 | * | ||
13 | * Derived from "arch/i386/mm/init.c" | ||
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
15 | * | ||
16 | * Dave Engebretsen <engebret@us.ibm.com> | ||
17 | * Rework for PPC64 port. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | */ | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/hardirq.h> | ||
30 | #include <asm/pgalloc.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/tlb.h> | ||
33 | #include <linux/highmem.h> | ||
34 | |||
35 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | ||
36 | |||
37 | /* This is declared as we are using the more or less generic | ||
38 | * include/asm-ppc64/tlb.h file -- tgall | ||
39 | */ | ||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
41 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
42 | unsigned long pte_freelist_forced_free; | ||
43 | |||
44 | struct pte_freelist_batch | ||
45 | { | ||
46 | struct rcu_head rcu; | ||
47 | unsigned int index; | ||
48 | pgtable_free_t tables[0]; | ||
49 | }; | ||
50 | |||
51 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
52 | unsigned long pte_freelist_forced_free; | ||
53 | |||
54 | #define PTE_FREELIST_SIZE \ | ||
55 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
56 | / sizeof(pgtable_free_t)) | ||
57 | |||
58 | #ifdef CONFIG_SMP | ||
59 | static void pte_free_smp_sync(void *arg) | ||
60 | { | ||
61 | /* Do nothing, just ensure we sync with all CPUs */ | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | /* This is only called when we are critically out of memory | ||
66 | * (and fail to get a page in pte_free_tlb). | ||
67 | */ | ||
68 | static void pgtable_free_now(pgtable_free_t pgf) | ||
69 | { | ||
70 | pte_freelist_forced_free++; | ||
71 | |||
72 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | ||
73 | |||
74 | pgtable_free(pgf); | ||
75 | } | ||
76 | |||
77 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
78 | { | ||
79 | struct pte_freelist_batch *batch = | ||
80 | container_of(head, struct pte_freelist_batch, rcu); | ||
81 | unsigned int i; | ||
82 | |||
83 | for (i = 0; i < batch->index; i++) | ||
84 | pgtable_free(batch->tables[i]); | ||
85 | |||
86 | free_page((unsigned long)batch); | ||
87 | } | ||
88 | |||
89 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
90 | { | ||
91 | INIT_RCU_HEAD(&batch->rcu); | ||
92 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
93 | } | ||
94 | |||
95 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
96 | { | ||
97 | /* This is safe as we are holding page_table_lock */ | ||
98 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
99 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
100 | |||
101 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
102 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
103 | pgtable_free(pgf); | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | if (*batchp == NULL) { | ||
108 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
109 | if (*batchp == NULL) { | ||
110 | pgtable_free_now(pgf); | ||
111 | return; | ||
112 | } | ||
113 | (*batchp)->index = 0; | ||
114 | } | ||
115 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
116 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
117 | pte_free_submit(*batchp); | ||
118 | *batchp = NULL; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Update the MMU hash table to correspond with a change to | ||
124 | * a Linux PTE. If wrprot is true, it is permissible to | ||
125 | * change the existing HPTE to read-only rather than removing it | ||
126 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). | ||
127 | */ | ||
128 | void hpte_update(struct mm_struct *mm, unsigned long addr, | ||
129 | unsigned long pte, int wrprot) | ||
130 | { | ||
131 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
132 | unsigned long vsid; | ||
133 | int i; | ||
134 | |||
135 | i = batch->index; | ||
136 | |||
137 | /* | ||
138 | * This can happen when we are in the middle of a TLB batch and | ||
139 | * we encounter memory pressure (eg copy_page_range when it tries | ||
140 | * to allocate a new pte). If we have to reclaim memory and end | ||
141 | * up scanning and resetting referenced bits then our batch context | ||
142 | * will change mid stream. | ||
143 | */ | ||
144 | if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) { | ||
145 | flush_tlb_pending(); | ||
146 | i = 0; | ||
147 | } | ||
148 | if (i == 0) { | ||
149 | batch->mm = mm; | ||
150 | batch->large = pte_huge(pte); | ||
151 | } | ||
152 | if (addr < KERNELBASE) { | ||
153 | vsid = get_vsid(mm->context.id, addr); | ||
154 | WARN_ON(vsid == 0); | ||
155 | } else | ||
156 | vsid = get_kernel_vsid(addr); | ||
157 | batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); | ||
158 | batch->pte[i] = __pte(pte); | ||
159 | batch->index = ++i; | ||
160 | if (i >= PPC64_TLB_BATCH_NR) | ||
161 | flush_tlb_pending(); | ||
162 | } | ||
163 | |||
164 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | ||
165 | { | ||
166 | int i; | ||
167 | int cpu; | ||
168 | cpumask_t tmp; | ||
169 | int local = 0; | ||
170 | |||
171 | BUG_ON(in_interrupt()); | ||
172 | |||
173 | cpu = get_cpu(); | ||
174 | i = batch->index; | ||
175 | tmp = cpumask_of_cpu(cpu); | ||
176 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | ||
177 | local = 1; | ||
178 | |||
179 | if (i == 1) | ||
180 | flush_hash_page(batch->vaddr[0], batch->pte[0], local); | ||
181 | else | ||
182 | flush_hash_range(i, local); | ||
183 | batch->index = 0; | ||
184 | put_cpu(); | ||
185 | } | ||
186 | |||
187 | void pte_free_finish(void) | ||
188 | { | ||
189 | /* This is safe as we are holding page_table_lock */ | ||
190 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
191 | |||
192 | if (*batchp == NULL) | ||
193 | return; | ||
194 | pte_free_submit(*batchp); | ||
195 | *batchp = NULL; | ||
196 | } | ||