diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2008-11-19 07:50:04 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-03 04:46:35 -0500 |
commit | 0186f47e703fb7aa14b54459d642ef5374b3a685 (patch) | |
tree | 5af0bc2bf92b9ef7edf79aab989387de4d8ffaaf /arch/powerpc/mm | |
parent | df3b8611554e389e703fa753540289874fa5126c (diff) |
powerpc: Use RCU based pte freeing mechanism for all powerpc
Refactor the RCU based pte free code that was used on ppc64 to be used
on all powerpc.
Additionally refactor pte_free() & pte_free_kernel() into common code
between ppc32 & ppc64.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 30 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 117 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 86 |
5 files changed, 118 insertions, 138 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index e7392b45a5ef..86e657bcfa7e 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y) | |||
6 | EXTRA_CFLAGS += -mno-minimal-toc | 6 | EXTRA_CFLAGS += -mno-minimal-toc |
7 | endif | 7 | endif |
8 | 8 | ||
9 | obj-y := fault.o mem.o \ | 9 | obj-y := fault.o mem.o pgtable.o \ |
10 | init_$(CONFIG_WORD_SIZE).o \ | 10 | init_$(CONFIG_WORD_SIZE).o \ |
11 | pgtable_$(CONFIG_WORD_SIZE).o \ | 11 | pgtable_$(CONFIG_WORD_SIZE).o \ |
12 | mmu_context_$(CONFIG_WORD_SIZE).o | 12 | mmu_context_$(CONFIG_WORD_SIZE).o |
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 7bffb70b9fe2..c5536b8b37a9 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S | |||
@@ -36,36 +36,6 @@ mmu_hash_lock: | |||
36 | #endif /* CONFIG_SMP */ | 36 | #endif /* CONFIG_SMP */ |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Sync CPUs with hash_page taking & releasing the hash | ||
40 | * table lock | ||
41 | */ | ||
42 | #ifdef CONFIG_SMP | ||
43 | .text | ||
44 | _GLOBAL(hash_page_sync) | ||
45 | mfmsr r10 | ||
46 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
47 | mtmsr r0 | ||
48 | lis r8,mmu_hash_lock@h | ||
49 | ori r8,r8,mmu_hash_lock@l | ||
50 | lis r0,0x0fff | ||
51 | b 10f | ||
52 | 11: lwz r6,0(r8) | ||
53 | cmpwi 0,r6,0 | ||
54 | bne 11b | ||
55 | 10: lwarx r6,0,r8 | ||
56 | cmpwi 0,r6,0 | ||
57 | bne- 11b | ||
58 | stwcx. r0,0,r8 | ||
59 | bne- 10b | ||
60 | isync | ||
61 | eieio | ||
62 | li r0,0 | ||
63 | stw r0,0(r8) | ||
64 | mtmsr r10 | ||
65 | blr | ||
66 | #endif /* CONFIG_SMP */ | ||
67 | |||
68 | /* | ||
69 | * Load a PTE into the hash table, if possible. | 39 | * Load a PTE into the hash table, if possible. |
70 | * The address is in r4, and r3 contains an access flag: | 40 | * The address is in r4, and r3 contains an access flag: |
71 | * _PAGE_RW (0x400) if a write. | 41 | * _PAGE_RW (0x400) if a write. |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c new file mode 100644 index 000000000000..6d94116fdea1 --- /dev/null +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * This file contains common routines for dealing with free of page tables | ||
3 | * | ||
4 | * Derived from arch/powerpc/mm/tlb_64.c: | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
9 | * Copyright (C) 1996 Paul Mackerras | ||
10 | * | ||
11 | * Derived from "arch/i386/mm/init.c" | ||
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
13 | * | ||
14 | * Dave Engebretsen <engebret@us.ibm.com> | ||
15 | * Rework for PPC64 port. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/hardirq.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
33 | static unsigned long pte_freelist_forced_free; | ||
34 | |||
35 | struct pte_freelist_batch | ||
36 | { | ||
37 | struct rcu_head rcu; | ||
38 | unsigned int index; | ||
39 | pgtable_free_t tables[0]; | ||
40 | }; | ||
41 | |||
42 | #define PTE_FREELIST_SIZE \ | ||
43 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
44 | / sizeof(pgtable_free_t)) | ||
45 | |||
46 | static void pte_free_smp_sync(void *arg) | ||
47 | { | ||
48 | /* Do nothing, just ensure we sync with all CPUs */ | ||
49 | } | ||
50 | |||
51 | /* This is only called when we are critically out of memory | ||
52 | * (and fail to get a page in pte_free_tlb). | ||
53 | */ | ||
54 | static void pgtable_free_now(pgtable_free_t pgf) | ||
55 | { | ||
56 | pte_freelist_forced_free++; | ||
57 | |||
58 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
59 | |||
60 | pgtable_free(pgf); | ||
61 | } | ||
62 | |||
63 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
64 | { | ||
65 | struct pte_freelist_batch *batch = | ||
66 | container_of(head, struct pte_freelist_batch, rcu); | ||
67 | unsigned int i; | ||
68 | |||
69 | for (i = 0; i < batch->index; i++) | ||
70 | pgtable_free(batch->tables[i]); | ||
71 | |||
72 | free_page((unsigned long)batch); | ||
73 | } | ||
74 | |||
75 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
76 | { | ||
77 | INIT_RCU_HEAD(&batch->rcu); | ||
78 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
79 | } | ||
80 | |||
81 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
82 | { | ||
83 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
84 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
85 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
86 | |||
87 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
88 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
89 | pgtable_free(pgf); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | if (*batchp == NULL) { | ||
94 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
95 | if (*batchp == NULL) { | ||
96 | pgtable_free_now(pgf); | ||
97 | return; | ||
98 | } | ||
99 | (*batchp)->index = 0; | ||
100 | } | ||
101 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
102 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
103 | pte_free_submit(*batchp); | ||
104 | *batchp = NULL; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | void pte_free_finish(void) | ||
109 | { | ||
110 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
111 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
112 | |||
113 | if (*batchp == NULL) | ||
114 | return; | ||
115 | pte_free_submit(*batchp); | ||
116 | *batchp = NULL; | ||
117 | } | ||
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 44fbc81c9b2c..c7b755cba26a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ | |||
48 | 48 | ||
49 | extern char etext[], _stext[]; | 49 | extern char etext[], _stext[]; |
50 | 50 | ||
51 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
52 | extern void hash_page_sync(void); | ||
53 | #endif | ||
54 | |||
55 | #ifdef HAVE_BATS | 51 | #ifdef HAVE_BATS |
56 | extern phys_addr_t v_mapped_by_bats(unsigned long va); | 52 | extern phys_addr_t v_mapped_by_bats(unsigned long va); |
57 | extern unsigned long p_mapped_by_bats(phys_addr_t pa); | 53 | extern unsigned long p_mapped_by_bats(phys_addr_t pa); |
@@ -125,23 +121,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
125 | return ptepage; | 121 | return ptepage; |
126 | } | 122 | } |
127 | 123 | ||
128 | void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
129 | { | ||
130 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
131 | hash_page_sync(); | ||
132 | #endif | ||
133 | free_page((unsigned long)pte); | ||
134 | } | ||
135 | |||
136 | void pte_free(struct mm_struct *mm, pgtable_t ptepage) | ||
137 | { | ||
138 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
139 | hash_page_sync(); | ||
140 | #endif | ||
141 | pgtable_page_dtor(ptepage); | ||
142 | __free_page(ptepage); | ||
143 | } | ||
144 | |||
145 | void __iomem * | 124 | void __iomem * |
146 | ioremap(phys_addr_t addr, unsigned long size) | 125 | ioremap(phys_addr_t addr, unsigned long size) |
147 | { | 126 | { |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index be7dd422c0fa..c931bc7d1079 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
37 | * arch/powerpc/include/asm/tlb.h file -- tgall | 37 | * arch/powerpc/include/asm/tlb.h file -- tgall |
38 | */ | 38 | */ |
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
41 | static unsigned long pte_freelist_forced_free; | ||
42 | |||
43 | struct pte_freelist_batch | ||
44 | { | ||
45 | struct rcu_head rcu; | ||
46 | unsigned int index; | ||
47 | pgtable_free_t tables[0]; | ||
48 | }; | ||
49 | |||
50 | #define PTE_FREELIST_SIZE \ | ||
51 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
52 | / sizeof(pgtable_free_t)) | ||
53 | |||
54 | static void pte_free_smp_sync(void *arg) | ||
55 | { | ||
56 | /* Do nothing, just ensure we sync with all CPUs */ | ||
57 | } | ||
58 | |||
59 | /* This is only called when we are critically out of memory | ||
60 | * (and fail to get a page in pte_free_tlb). | ||
61 | */ | ||
62 | static void pgtable_free_now(pgtable_free_t pgf) | ||
63 | { | ||
64 | pte_freelist_forced_free++; | ||
65 | |||
66 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
67 | |||
68 | pgtable_free(pgf); | ||
69 | } | ||
70 | |||
71 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
72 | { | ||
73 | struct pte_freelist_batch *batch = | ||
74 | container_of(head, struct pte_freelist_batch, rcu); | ||
75 | unsigned int i; | ||
76 | |||
77 | for (i = 0; i < batch->index; i++) | ||
78 | pgtable_free(batch->tables[i]); | ||
79 | |||
80 | free_page((unsigned long)batch); | ||
81 | } | ||
82 | |||
83 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
84 | { | ||
85 | INIT_RCU_HEAD(&batch->rcu); | ||
86 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
87 | } | ||
88 | |||
89 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
90 | { | ||
91 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
92 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
93 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
94 | |||
95 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
96 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
97 | pgtable_free(pgf); | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | if (*batchp == NULL) { | ||
102 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
103 | if (*batchp == NULL) { | ||
104 | pgtable_free_now(pgf); | ||
105 | return; | ||
106 | } | ||
107 | (*batchp)->index = 0; | ||
108 | } | ||
109 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
110 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
111 | pte_free_submit(*batchp); | ||
112 | *batchp = NULL; | ||
113 | } | ||
114 | } | ||
115 | 40 | ||
116 | /* | 41 | /* |
117 | * A linux PTE was changed and the corresponding hash table entry | 42 | * A linux PTE was changed and the corresponding hash table entry |
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
229 | batch->index = 0; | 154 | batch->index = 0; |
230 | } | 155 | } |
231 | 156 | ||
232 | void pte_free_finish(void) | ||
233 | { | ||
234 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
235 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
236 | |||
237 | if (*batchp == NULL) | ||
238 | return; | ||
239 | pte_free_submit(*batchp); | ||
240 | *batchp = NULL; | ||
241 | } | ||
242 | |||
243 | /** | 157 | /** |
244 | * __flush_hash_table_range - Flush all HPTEs for a given address range | 158 | * __flush_hash_table_range - Flush all HPTEs for a given address range |
245 | * from the hash table (and the TLB). But keeps | 159 | * from the hash table (and the TLB). But keeps |