diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-32.h | 11 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-64.h | 34 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgalloc.h | 41 | ||||
-rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 30 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 117 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 86 |
8 files changed, 167 insertions, 175 deletions
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 58c07147b3ea..0815eb40acae 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | 5 | ||
6 | #define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */ | ||
7 | |||
6 | extern void __bad_pte(pmd_t *pmd); | 8 | extern void __bad_pte(pmd_t *pmd); |
7 | 9 | ||
8 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 10 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |||
33 | 35 | ||
34 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); | 36 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); |
35 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); | 37 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); |
36 | extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); | ||
37 | extern void pte_free(struct mm_struct *mm, pgtable_t pte); | ||
38 | 38 | ||
39 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | 39 | static inline void pgtable_free(pgtable_free_t pgf) |
40 | { | ||
41 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); | ||
42 | |||
43 | free_page((unsigned long)p); | ||
44 | } | ||
40 | 45 | ||
41 | #define check_pgt_cache() do { } while (0) | 46 | #define check_pgt_cache() do { } while (0) |
42 | 47 | ||
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 812a1d8f35cb..afda2bdd860f 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -7,7 +7,6 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
12 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
13 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
108 | return page; | 107 | return page; |
109 | } | 108 | } |
110 | 109 | ||
111 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
112 | { | ||
113 | free_page((unsigned long)pte); | ||
114 | } | ||
115 | |||
116 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | ||
117 | { | ||
118 | pgtable_page_dtor(ptepage); | ||
119 | __free_page(ptepage); | ||
120 | } | ||
121 | |||
122 | #define PGF_CACHENUM_MASK 0x7 | ||
123 | |||
124 | typedef struct pgtable_free { | ||
125 | unsigned long val; | ||
126 | } pgtable_free_t; | ||
127 | |||
128 | static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, | ||
129 | unsigned long mask) | ||
130 | { | ||
131 | BUG_ON(cachenum > PGF_CACHENUM_MASK); | ||
132 | |||
133 | return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; | ||
134 | } | ||
135 | |||
136 | static inline void pgtable_free(pgtable_free_t pgf) | 110 | static inline void pgtable_free(pgtable_free_t pgf) |
137 | { | 111 | { |
138 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); | 112 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); |
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf) | |||
144 | kmem_cache_free(pgtable_cache[cachenum], p); | 118 | kmem_cache_free(pgtable_cache[cachenum], p); |
145 | } | 119 | } |
146 | 120 | ||
147 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | ||
148 | |||
149 | #define __pte_free_tlb(tlb,ptepage) \ | ||
150 | do { \ | ||
151 | pgtable_page_dtor(ptepage); \ | ||
152 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | ||
153 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ | ||
154 | } while (0) | ||
155 | #define __pmd_free_tlb(tlb, pmd) \ | 121 | #define __pmd_free_tlb(tlb, pmd) \ |
156 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ | 122 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
157 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) | 123 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index b4505ed0f0f2..5d8480265a77 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h | |||
@@ -2,11 +2,52 @@ | |||
2 | #define _ASM_POWERPC_PGALLOC_H | 2 | #define _ASM_POWERPC_PGALLOC_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/mm.h> | ||
6 | |||
7 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
8 | { | ||
9 | free_page((unsigned long)pte); | ||
10 | } | ||
11 | |||
12 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | ||
13 | { | ||
14 | pgtable_page_dtor(ptepage); | ||
15 | __free_page(ptepage); | ||
16 | } | ||
17 | |||
18 | typedef struct pgtable_free { | ||
19 | unsigned long val; | ||
20 | } pgtable_free_t; | ||
21 | |||
22 | #define PGF_CACHENUM_MASK 0x7 | ||
23 | |||
24 | static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, | ||
25 | unsigned long mask) | ||
26 | { | ||
27 | BUG_ON(cachenum > PGF_CACHENUM_MASK); | ||
28 | |||
29 | return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; | ||
30 | } | ||
31 | |||
5 | #ifdef CONFIG_PPC64 | 32 | #ifdef CONFIG_PPC64 |
6 | #include <asm/pgalloc-64.h> | 33 | #include <asm/pgalloc-64.h> |
7 | #else | 34 | #else |
8 | #include <asm/pgalloc-32.h> | 35 | #include <asm/pgalloc-32.h> |
9 | #endif | 36 | #endif |
10 | 37 | ||
38 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | #define __pte_free_tlb(tlb,ptepage) \ | ||
42 | do { \ | ||
43 | pgtable_page_dtor(ptepage); \ | ||
44 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | ||
45 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ | ||
46 | } while (0) | ||
47 | #else | ||
48 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | ||
49 | #endif | ||
50 | |||
51 | |||
11 | #endif /* __KERNEL__ */ | 52 | #endif /* __KERNEL__ */ |
12 | #endif /* _ASM_POWERPC_PGALLOC_H */ | 53 | #endif /* _ASM_POWERPC_PGALLOC_H */ |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index e7392b45a5ef..86e657bcfa7e 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y) | |||
6 | EXTRA_CFLAGS += -mno-minimal-toc | 6 | EXTRA_CFLAGS += -mno-minimal-toc |
7 | endif | 7 | endif |
8 | 8 | ||
9 | obj-y := fault.o mem.o \ | 9 | obj-y := fault.o mem.o pgtable.o \ |
10 | init_$(CONFIG_WORD_SIZE).o \ | 10 | init_$(CONFIG_WORD_SIZE).o \ |
11 | pgtable_$(CONFIG_WORD_SIZE).o \ | 11 | pgtable_$(CONFIG_WORD_SIZE).o \ |
12 | mmu_context_$(CONFIG_WORD_SIZE).o | 12 | mmu_context_$(CONFIG_WORD_SIZE).o |
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 7bffb70b9fe2..c5536b8b37a9 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S | |||
@@ -36,36 +36,6 @@ mmu_hash_lock: | |||
36 | #endif /* CONFIG_SMP */ | 36 | #endif /* CONFIG_SMP */ |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Sync CPUs with hash_page taking & releasing the hash | ||
40 | * table lock | ||
41 | */ | ||
42 | #ifdef CONFIG_SMP | ||
43 | .text | ||
44 | _GLOBAL(hash_page_sync) | ||
45 | mfmsr r10 | ||
46 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
47 | mtmsr r0 | ||
48 | lis r8,mmu_hash_lock@h | ||
49 | ori r8,r8,mmu_hash_lock@l | ||
50 | lis r0,0x0fff | ||
51 | b 10f | ||
52 | 11: lwz r6,0(r8) | ||
53 | cmpwi 0,r6,0 | ||
54 | bne 11b | ||
55 | 10: lwarx r6,0,r8 | ||
56 | cmpwi 0,r6,0 | ||
57 | bne- 11b | ||
58 | stwcx. r0,0,r8 | ||
59 | bne- 10b | ||
60 | isync | ||
61 | eieio | ||
62 | li r0,0 | ||
63 | stw r0,0(r8) | ||
64 | mtmsr r10 | ||
65 | blr | ||
66 | #endif /* CONFIG_SMP */ | ||
67 | |||
68 | /* | ||
69 | * Load a PTE into the hash table, if possible. | 39 | * Load a PTE into the hash table, if possible. |
70 | * The address is in r4, and r3 contains an access flag: | 40 | * The address is in r4, and r3 contains an access flag: |
71 | * _PAGE_RW (0x400) if a write. | 41 | * _PAGE_RW (0x400) if a write. |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c new file mode 100644 index 000000000000..6d94116fdea1 --- /dev/null +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * This file contains common routines for dealing with free of page tables | ||
3 | * | ||
4 | * Derived from arch/powerpc/mm/tlb_64.c: | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
9 | * Copyright (C) 1996 Paul Mackerras | ||
10 | * | ||
11 | * Derived from "arch/i386/mm/init.c" | ||
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
13 | * | ||
14 | * Dave Engebretsen <engebret@us.ibm.com> | ||
15 | * Rework for PPC64 port. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/hardirq.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
33 | static unsigned long pte_freelist_forced_free; | ||
34 | |||
35 | struct pte_freelist_batch | ||
36 | { | ||
37 | struct rcu_head rcu; | ||
38 | unsigned int index; | ||
39 | pgtable_free_t tables[0]; | ||
40 | }; | ||
41 | |||
42 | #define PTE_FREELIST_SIZE \ | ||
43 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
44 | / sizeof(pgtable_free_t)) | ||
45 | |||
46 | static void pte_free_smp_sync(void *arg) | ||
47 | { | ||
48 | /* Do nothing, just ensure we sync with all CPUs */ | ||
49 | } | ||
50 | |||
51 | /* This is only called when we are critically out of memory | ||
52 | * (and fail to get a page in pte_free_tlb). | ||
53 | */ | ||
54 | static void pgtable_free_now(pgtable_free_t pgf) | ||
55 | { | ||
56 | pte_freelist_forced_free++; | ||
57 | |||
58 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
59 | |||
60 | pgtable_free(pgf); | ||
61 | } | ||
62 | |||
63 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
64 | { | ||
65 | struct pte_freelist_batch *batch = | ||
66 | container_of(head, struct pte_freelist_batch, rcu); | ||
67 | unsigned int i; | ||
68 | |||
69 | for (i = 0; i < batch->index; i++) | ||
70 | pgtable_free(batch->tables[i]); | ||
71 | |||
72 | free_page((unsigned long)batch); | ||
73 | } | ||
74 | |||
75 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
76 | { | ||
77 | INIT_RCU_HEAD(&batch->rcu); | ||
78 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
79 | } | ||
80 | |||
81 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
82 | { | ||
83 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
84 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
85 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
86 | |||
87 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
88 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
89 | pgtable_free(pgf); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | if (*batchp == NULL) { | ||
94 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
95 | if (*batchp == NULL) { | ||
96 | pgtable_free_now(pgf); | ||
97 | return; | ||
98 | } | ||
99 | (*batchp)->index = 0; | ||
100 | } | ||
101 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
102 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
103 | pte_free_submit(*batchp); | ||
104 | *batchp = NULL; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | void pte_free_finish(void) | ||
109 | { | ||
110 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
111 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
112 | |||
113 | if (*batchp == NULL) | ||
114 | return; | ||
115 | pte_free_submit(*batchp); | ||
116 | *batchp = NULL; | ||
117 | } | ||
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 44fbc81c9b2c..c7b755cba26a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ | |||
48 | 48 | ||
49 | extern char etext[], _stext[]; | 49 | extern char etext[], _stext[]; |
50 | 50 | ||
51 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
52 | extern void hash_page_sync(void); | ||
53 | #endif | ||
54 | |||
55 | #ifdef HAVE_BATS | 51 | #ifdef HAVE_BATS |
56 | extern phys_addr_t v_mapped_by_bats(unsigned long va); | 52 | extern phys_addr_t v_mapped_by_bats(unsigned long va); |
57 | extern unsigned long p_mapped_by_bats(phys_addr_t pa); | 53 | extern unsigned long p_mapped_by_bats(phys_addr_t pa); |
@@ -125,23 +121,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
125 | return ptepage; | 121 | return ptepage; |
126 | } | 122 | } |
127 | 123 | ||
128 | void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
129 | { | ||
130 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
131 | hash_page_sync(); | ||
132 | #endif | ||
133 | free_page((unsigned long)pte); | ||
134 | } | ||
135 | |||
136 | void pte_free(struct mm_struct *mm, pgtable_t ptepage) | ||
137 | { | ||
138 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32) | ||
139 | hash_page_sync(); | ||
140 | #endif | ||
141 | pgtable_page_dtor(ptepage); | ||
142 | __free_page(ptepage); | ||
143 | } | ||
144 | |||
145 | void __iomem * | 124 | void __iomem * |
146 | ioremap(phys_addr_t addr, unsigned long size) | 125 | ioremap(phys_addr_t addr, unsigned long size) |
147 | { | 126 | { |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index be7dd422c0fa..c931bc7d1079 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
37 | * arch/powerpc/include/asm/tlb.h file -- tgall | 37 | * arch/powerpc/include/asm/tlb.h file -- tgall |
38 | */ | 38 | */ |
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
41 | static unsigned long pte_freelist_forced_free; | ||
42 | |||
43 | struct pte_freelist_batch | ||
44 | { | ||
45 | struct rcu_head rcu; | ||
46 | unsigned int index; | ||
47 | pgtable_free_t tables[0]; | ||
48 | }; | ||
49 | |||
50 | #define PTE_FREELIST_SIZE \ | ||
51 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
52 | / sizeof(pgtable_free_t)) | ||
53 | |||
54 | static void pte_free_smp_sync(void *arg) | ||
55 | { | ||
56 | /* Do nothing, just ensure we sync with all CPUs */ | ||
57 | } | ||
58 | |||
59 | /* This is only called when we are critically out of memory | ||
60 | * (and fail to get a page in pte_free_tlb). | ||
61 | */ | ||
62 | static void pgtable_free_now(pgtable_free_t pgf) | ||
63 | { | ||
64 | pte_freelist_forced_free++; | ||
65 | |||
66 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
67 | |||
68 | pgtable_free(pgf); | ||
69 | } | ||
70 | |||
71 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
72 | { | ||
73 | struct pte_freelist_batch *batch = | ||
74 | container_of(head, struct pte_freelist_batch, rcu); | ||
75 | unsigned int i; | ||
76 | |||
77 | for (i = 0; i < batch->index; i++) | ||
78 | pgtable_free(batch->tables[i]); | ||
79 | |||
80 | free_page((unsigned long)batch); | ||
81 | } | ||
82 | |||
83 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
84 | { | ||
85 | INIT_RCU_HEAD(&batch->rcu); | ||
86 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
87 | } | ||
88 | |||
89 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
90 | { | ||
91 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
92 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
93 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
94 | |||
95 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
96 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | ||
97 | pgtable_free(pgf); | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | if (*batchp == NULL) { | ||
102 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
103 | if (*batchp == NULL) { | ||
104 | pgtable_free_now(pgf); | ||
105 | return; | ||
106 | } | ||
107 | (*batchp)->index = 0; | ||
108 | } | ||
109 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
110 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
111 | pte_free_submit(*batchp); | ||
112 | *batchp = NULL; | ||
113 | } | ||
114 | } | ||
115 | 40 | ||
116 | /* | 41 | /* |
117 | * A linux PTE was changed and the corresponding hash table entry | 42 | * A linux PTE was changed and the corresponding hash table entry |
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
229 | batch->index = 0; | 154 | batch->index = 0; |
230 | } | 155 | } |
231 | 156 | ||
232 | void pte_free_finish(void) | ||
233 | { | ||
234 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
235 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
236 | |||
237 | if (*batchp == NULL) | ||
238 | return; | ||
239 | pte_free_submit(*batchp); | ||
240 | *batchp = NULL; | ||
241 | } | ||
242 | |||
243 | /** | 157 | /** |
244 | * __flush_hash_table_range - Flush all HPTEs for a given address range | 158 | * __flush_hash_table_range - Flush all HPTEs for a given address range |
245 | * from the hash table (and the TLB). But keeps | 159 | * from the hash table (and the TLB). But keeps |