aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r--arch/powerpc/mm/pgtable.c98
1 files changed, 0 insertions, 98 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6e72788598f8..af40c8768a78 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -33,104 +33,6 @@
33 33
34#include "mmu_decl.h" 34#include "mmu_decl.h"
35 35
36#ifdef CONFIG_SMP
37
38/*
39 * Handle batching of page table freeing on SMP. Page tables are
40 * queued up and send to be freed later by RCU in order to avoid
41 * freeing a page table page that is being walked without locks
42 */
43
44static unsigned long pte_freelist_forced_free;
45
46struct pte_freelist_batch
47{
48 struct rcu_head rcu;
49 unsigned int index;
50 unsigned long tables[0];
51};
52
53#define PTE_FREELIST_SIZE \
54 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
55 / sizeof(unsigned long))
56
57static void pte_free_smp_sync(void *arg)
58{
59 /* Do nothing, just ensure we sync with all CPUs */
60}
61
62/* This is only called when we are critically out of memory
63 * (and fail to get a page in pte_free_tlb).
64 */
65static void pgtable_free_now(void *table, unsigned shift)
66{
67 pte_freelist_forced_free++;
68
69 smp_call_function(pte_free_smp_sync, NULL, 1);
70
71 pgtable_free(table, shift);
72}
73
74static void pte_free_rcu_callback(struct rcu_head *head)
75{
76 struct pte_freelist_batch *batch =
77 container_of(head, struct pte_freelist_batch, rcu);
78 unsigned int i;
79
80 for (i = 0; i < batch->index; i++) {
81 void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
82 unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
83
84 pgtable_free(table, shift);
85 }
86
87 free_page((unsigned long)batch);
88}
89
90static void pte_free_submit(struct pte_freelist_batch *batch)
91{
92 call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
96{
97 struct pte_freelist_batch **batchp = &tlb->arch.batch;
98 unsigned long pgf;
99
100 if (atomic_read(&tlb->mm->mm_users) < 2) {
101 pgtable_free(table, shift);
102 return;
103 }
104
105 if (*batchp == NULL) {
106 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
107 if (*batchp == NULL) {
108 pgtable_free_now(table, shift);
109 return;
110 }
111 (*batchp)->index = 0;
112 }
113 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
114 pgf = (unsigned long)table | shift;
115 (*batchp)->tables[(*batchp)->index++] = pgf;
116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
117 pte_free_submit(*batchp);
118 *batchp = NULL;
119 }
120}
121
122void pte_free_finish(struct mmu_gather *tlb)
123{
124 struct pte_freelist_batch **batchp = &tlb->arch.batch;
125
126 if (*batchp == NULL)
127 return;
128 pte_free_submit(*batchp);
129 *batchp = NULL;
130}
131
132#endif /* CONFIG_SMP */
133
134static inline int is_exec_fault(void) 36static inline int is_exec_fault(void)
135{ 37{
136 return current->thread.regs && TRAP(current->thread.regs) == 0x400; 38 return current->thread.regs && TRAP(current->thread.regs) == 0x400;