diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc64/mm/tlb.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc64/mm/tlb.c')
-rw-r--r-- | arch/ppc64/mm/tlb.c | 180 |
1 files changed, 180 insertions, 0 deletions
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c new file mode 100644 index 000000000000..26f0172c4527 --- /dev/null +++ b/arch/ppc64/mm/tlb.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * This file contains the routines for flushing entries from the | ||
3 | * TLB and MMU hash table. | ||
4 | * | ||
5 | * Derived from arch/ppc64/mm/init.c: | ||
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
7 | * | ||
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
10 | * Copyright (C) 1996 Paul Mackerras | ||
11 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
12 | * | ||
13 | * Derived from "arch/i386/mm/init.c" | ||
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
15 | * | ||
16 | * Dave Engebretsen <engebret@us.ibm.com> | ||
17 | * Rework for PPC64 port. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | */ | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/hardirq.h> | ||
30 | #include <asm/pgalloc.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/tlb.h> | ||
33 | #include <linux/highmem.h> | ||
34 | |||
35 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | ||
36 | |||
37 | /* This is declared as we are using the more or less generic | ||
38 | * include/asm-ppc64/tlb.h file -- tgall | ||
39 | */ | ||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
41 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
42 | unsigned long pte_freelist_forced_free; | ||
43 | |||
44 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) | ||
45 | { | ||
46 | /* This is safe as we are holding page_table_lock */ | ||
47 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
48 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
49 | |||
50 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
51 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
52 | pte_free(ptepage); | ||
53 | return; | ||
54 | } | ||
55 | |||
56 | if (*batchp == NULL) { | ||
57 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
58 | if (*batchp == NULL) { | ||
59 | pte_free_now(ptepage); | ||
60 | return; | ||
61 | } | ||
62 | (*batchp)->index = 0; | ||
63 | } | ||
64 | (*batchp)->pages[(*batchp)->index++] = ptepage; | ||
65 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
66 | pte_free_submit(*batchp); | ||
67 | *batchp = NULL; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Update the MMU hash table to correspond with a change to | ||
73 | * a Linux PTE. If wrprot is true, it is permissible to | ||
74 | * change the existing HPTE to read-only rather than removing it | ||
75 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). | ||
76 | */ | ||
77 | void hpte_update(struct mm_struct *mm, unsigned long addr, | ||
78 | unsigned long pte, int wrprot) | ||
79 | { | ||
80 | int i; | ||
81 | unsigned long context = 0; | ||
82 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
83 | |||
84 | if (REGION_ID(addr) == USER_REGION_ID) | ||
85 | context = mm->context.id; | ||
86 | i = batch->index; | ||
87 | |||
88 | /* | ||
89 | * This can happen when we are in the middle of a TLB batch and | ||
90 | * we encounter memory pressure (eg copy_page_range when it tries | ||
91 | * to allocate a new pte). If we have to reclaim memory and end | ||
92 | * up scanning and resetting referenced bits then our batch context | ||
93 | * will change mid stream. | ||
94 | */ | ||
95 | if (unlikely(i != 0 && context != batch->context)) { | ||
96 | flush_tlb_pending(); | ||
97 | i = 0; | ||
98 | } | ||
99 | |||
100 | if (i == 0) { | ||
101 | batch->context = context; | ||
102 | batch->mm = mm; | ||
103 | } | ||
104 | batch->pte[i] = __pte(pte); | ||
105 | batch->addr[i] = addr; | ||
106 | batch->index = ++i; | ||
107 | if (i >= PPC64_TLB_BATCH_NR) | ||
108 | flush_tlb_pending(); | ||
109 | } | ||
110 | |||
111 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | ||
112 | { | ||
113 | int i; | ||
114 | int cpu; | ||
115 | cpumask_t tmp; | ||
116 | int local = 0; | ||
117 | |||
118 | BUG_ON(in_interrupt()); | ||
119 | |||
120 | cpu = get_cpu(); | ||
121 | i = batch->index; | ||
122 | tmp = cpumask_of_cpu(cpu); | ||
123 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | ||
124 | local = 1; | ||
125 | |||
126 | if (i == 1) | ||
127 | flush_hash_page(batch->context, batch->addr[0], batch->pte[0], | ||
128 | local); | ||
129 | else | ||
130 | flush_hash_range(batch->context, i, local); | ||
131 | batch->index = 0; | ||
132 | put_cpu(); | ||
133 | } | ||
134 | |||
135 | #ifdef CONFIG_SMP | ||
136 | static void pte_free_smp_sync(void *arg) | ||
137 | { | ||
138 | /* Do nothing, just ensure we sync with all CPUs */ | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | /* This is only called when we are critically out of memory | ||
143 | * (and fail to get a page in pte_free_tlb). | ||
144 | */ | ||
145 | void pte_free_now(struct page *ptepage) | ||
146 | { | ||
147 | pte_freelist_forced_free++; | ||
148 | |||
149 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | ||
150 | |||
151 | pte_free(ptepage); | ||
152 | } | ||
153 | |||
154 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
155 | { | ||
156 | struct pte_freelist_batch *batch = | ||
157 | container_of(head, struct pte_freelist_batch, rcu); | ||
158 | unsigned int i; | ||
159 | |||
160 | for (i = 0; i < batch->index; i++) | ||
161 | pte_free(batch->pages[i]); | ||
162 | free_page((unsigned long)batch); | ||
163 | } | ||
164 | |||
165 | void pte_free_submit(struct pte_freelist_batch *batch) | ||
166 | { | ||
167 | INIT_RCU_HEAD(&batch->rcu); | ||
168 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
169 | } | ||
170 | |||
171 | void pte_free_finish(void) | ||
172 | { | ||
173 | /* This is safe as we are holding page_table_lock */ | ||
174 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
175 | |||
176 | if (*batchp == NULL) | ||
177 | return; | ||
178 | pte_free_submit(*batchp); | ||
179 | *batchp = NULL; | ||
180 | } | ||