diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 104 |
1 files changed, 0 insertions, 104 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 2c7e801ab20b..af40c8768a78 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -33,110 +33,6 @@ | |||
33 | 33 | ||
34 | #include "mmu_decl.h" | 34 | #include "mmu_decl.h" |
35 | 35 | ||
36 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
37 | |||
38 | #ifdef CONFIG_SMP | ||
39 | |||
40 | /* | ||
41 | * Handle batching of page table freeing on SMP. Page tables are | ||
42 | * queued up and send to be freed later by RCU in order to avoid | ||
43 | * freeing a page table page that is being walked without locks | ||
44 | */ | ||
45 | |||
46 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
47 | static unsigned long pte_freelist_forced_free; | ||
48 | |||
49 | struct pte_freelist_batch | ||
50 | { | ||
51 | struct rcu_head rcu; | ||
52 | unsigned int index; | ||
53 | unsigned long tables[0]; | ||
54 | }; | ||
55 | |||
56 | #define PTE_FREELIST_SIZE \ | ||
57 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
58 | / sizeof(unsigned long)) | ||
59 | |||
60 | static void pte_free_smp_sync(void *arg) | ||
61 | { | ||
62 | /* Do nothing, just ensure we sync with all CPUs */ | ||
63 | } | ||
64 | |||
65 | /* This is only called when we are critically out of memory | ||
66 | * (and fail to get a page in pte_free_tlb). | ||
67 | */ | ||
68 | static void pgtable_free_now(void *table, unsigned shift) | ||
69 | { | ||
70 | pte_freelist_forced_free++; | ||
71 | |||
72 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
73 | |||
74 | pgtable_free(table, shift); | ||
75 | } | ||
76 | |||
77 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
78 | { | ||
79 | struct pte_freelist_batch *batch = | ||
80 | container_of(head, struct pte_freelist_batch, rcu); | ||
81 | unsigned int i; | ||
82 | |||
83 | for (i = 0; i < batch->index; i++) { | ||
84 | void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); | ||
85 | unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; | ||
86 | |||
87 | pgtable_free(table, shift); | ||
88 | } | ||
89 | |||
90 | free_page((unsigned long)batch); | ||
91 | } | ||
92 | |||
93 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
94 | { | ||
95 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
96 | } | ||
97 | |||
98 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) | ||
99 | { | ||
100 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
101 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
102 | unsigned long pgf; | ||
103 | |||
104 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
105 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ | ||
106 | pgtable_free(table, shift); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | if (*batchp == NULL) { | ||
111 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
112 | if (*batchp == NULL) { | ||
113 | pgtable_free_now(table, shift); | ||
114 | return; | ||
115 | } | ||
116 | (*batchp)->index = 0; | ||
117 | } | ||
118 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | ||
119 | pgf = (unsigned long)table | shift; | ||
120 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
121 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
122 | pte_free_submit(*batchp); | ||
123 | *batchp = NULL; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | void pte_free_finish(void) | ||
128 | { | ||
129 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
130 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
131 | |||
132 | if (*batchp == NULL) | ||
133 | return; | ||
134 | pte_free_submit(*batchp); | ||
135 | *batchp = NULL; | ||
136 | } | ||
137 | |||
138 | #endif /* CONFIG_SMP */ | ||
139 | |||
140 | static inline int is_exec_fault(void) | 36 | static inline int is_exec_fault(void) |
141 | { | 37 | { |
142 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; | 38 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; |