aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/tlb_64.c')
-rw-r--r--arch/powerpc/mm/tlb_64.c86
1 files changed, 0 insertions, 86 deletions
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index be7dd422c0fa..c931bc7d1079 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
37 * arch/powerpc/include/asm/tlb.h file -- tgall 37 * arch/powerpc/include/asm/tlb.h file -- tgall
38 */ 38 */
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
41static unsigned long pte_freelist_forced_free;
42
43struct pte_freelist_batch
44{
45 struct rcu_head rcu;
46 unsigned int index;
47 pgtable_free_t tables[0];
48};
49
50#define PTE_FREELIST_SIZE \
51 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
52 / sizeof(pgtable_free_t))
53
54static void pte_free_smp_sync(void *arg)
55{
56 /* Do nothing, just ensure we sync with all CPUs */
57}
58
59/* This is only called when we are critically out of memory
60 * (and fail to get a page in pte_free_tlb).
61 */
62static void pgtable_free_now(pgtable_free_t pgf)
63{
64 pte_freelist_forced_free++;
65
66 smp_call_function(pte_free_smp_sync, NULL, 1);
67
68 pgtable_free(pgf);
69}
70
71static void pte_free_rcu_callback(struct rcu_head *head)
72{
73 struct pte_freelist_batch *batch =
74 container_of(head, struct pte_freelist_batch, rcu);
75 unsigned int i;
76
77 for (i = 0; i < batch->index; i++)
78 pgtable_free(batch->tables[i]);
79
80 free_page((unsigned long)batch);
81}
82
83static void pte_free_submit(struct pte_freelist_batch *batch)
84{
85 INIT_RCU_HEAD(&batch->rcu);
86 call_rcu(&batch->rcu, pte_free_rcu_callback);
87}
88
89void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
90{
91 /* This is safe since tlb_gather_mmu has disabled preemption */
92 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
93 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
94
95 if (atomic_read(&tlb->mm->mm_users) < 2 ||
96 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
97 pgtable_free(pgf);
98 return;
99 }
100
101 if (*batchp == NULL) {
102 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
103 if (*batchp == NULL) {
104 pgtable_free_now(pgf);
105 return;
106 }
107 (*batchp)->index = 0;
108 }
109 (*batchp)->tables[(*batchp)->index++] = pgf;
110 if ((*batchp)->index == PTE_FREELIST_SIZE) {
111 pte_free_submit(*batchp);
112 *batchp = NULL;
113 }
114}
115 40
116/* 41/*
117 * A linux PTE was changed and the corresponding hash table entry 42 * A linux PTE was changed and the corresponding hash table entry
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
229 batch->index = 0; 154 batch->index = 0;
230} 155}
231 156
232void pte_free_finish(void)
233{
234 /* This is safe since tlb_gather_mmu has disabled preemption */
235 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
236
237 if (*batchp == NULL)
238 return;
239 pte_free_submit(*batchp);
240 *batchp = NULL;
241}
242
243/** 157/**
244 * __flush_hash_table_range - Flush all HPTEs for a given address range 158 * __flush_hash_table_range - Flush all HPTEs for a given address range
245 * from the hash table (and the TLB). But keeps 159 * from the hash table (and the TLB). But keeps