diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/gup.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/highmem.c | 77 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 2 |
9 files changed, 103 insertions, 29 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index c4bcf072cb3c..3e68363405b7 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for the linux ppc-specific parts of the memory manager. | 2 | # Makefile for the linux ppc-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
6 | |||
5 | ifeq ($(CONFIG_PPC64),y) | 7 | ifeq ($(CONFIG_PPC64),y) |
6 | EXTRA_CFLAGS += -mno-minimal-toc | 8 | EXTRA_CFLAGS += -mno-minimal-toc |
7 | endif | 9 | endif |
@@ -28,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o | |||
28 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 30 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
29 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o | 31 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o |
30 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o | 32 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o |
33 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5beffc8f481e..830bef0a1131 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -302,7 +302,7 @@ good_area: | |||
302 | * the fault. | 302 | * the fault. |
303 | */ | 303 | */ |
304 | survive: | 304 | survive: |
305 | ret = handle_mm_fault(mm, vma, address, is_write); | 305 | ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
306 | if (unlikely(ret & VM_FAULT_ERROR)) { | 306 | if (unlikely(ret & VM_FAULT_ERROR)) { |
307 | if (ret & VM_FAULT_OOM) | 307 | if (ret & VM_FAULT_OOM) |
308 | goto out_of_memory; | 308 | goto out_of_memory; |
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index bc400c78c97f..bc122a120bf0 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c | |||
@@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
159 | int psize; | 159 | int psize; |
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); | 162 | pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); |
163 | 163 | ||
164 | start &= PAGE_MASK; | 164 | start &= PAGE_MASK; |
165 | addr = start; | 165 | addr = start; |
@@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
170 | start, len))) | 170 | start, len))) |
171 | goto slow_irqon; | 171 | goto slow_irqon; |
172 | 172 | ||
173 | pr_debug(" aligned: %lx .. %lx\n", start, end); | 173 | pr_devel(" aligned: %lx .. %lx\n", start, end); |
174 | 174 | ||
175 | #ifdef CONFIG_HUGETLB_PAGE | 175 | #ifdef CONFIG_HUGETLB_PAGE |
176 | /* We bail out on slice boundary crossing when hugetlb is | 176 | /* We bail out on slice boundary crossing when hugetlb is |
@@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
234 | do { | 234 | do { |
235 | VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift); | 235 | VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift); |
236 | ptep = huge_pte_offset(mm, a); | 236 | ptep = huge_pte_offset(mm, a); |
237 | pr_debug(" %016lx: huge ptep %p\n", a, ptep); | 237 | pr_devel(" %016lx: huge ptep %p\n", a, ptep); |
238 | if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages, | 238 | if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages, |
239 | &nr)) | 239 | &nr)) |
240 | goto slow; | 240 | goto slow; |
@@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
249 | #ifdef CONFIG_PPC64 | 249 | #ifdef CONFIG_PPC64 |
250 | VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); | 250 | VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); |
251 | #endif | 251 | #endif |
252 | pr_debug(" %016lx: normal pgd %p\n", addr, | 252 | pr_devel(" %016lx: normal pgd %p\n", addr, |
253 | (void *)pgd_val(pgd)); | 253 | (void *)pgd_val(pgd)); |
254 | next = pgd_addr_end(addr, end); | 254 | next = pgd_addr_end(addr, end); |
255 | if (pgd_none(pgd)) | 255 | if (pgd_none(pgd)) |
@@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
269 | slow: | 269 | slow: |
270 | local_irq_enable(); | 270 | local_irq_enable(); |
271 | slow_irqon: | 271 | slow_irqon: |
272 | pr_debug(" slow path ! nr = %d\n", nr); | 272 | pr_devel(" slow path ! nr = %d\n", nr); |
273 | 273 | ||
274 | /* Try to get the remaining pages with get_user_pages */ | 274 | /* Try to get the remaining pages with get_user_pages */ |
275 | start += nr << PAGE_SHIFT; | 275 | start += nr << PAGE_SHIFT; |
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c new file mode 100644 index 000000000000..c2186c74c85a --- /dev/null +++ b/arch/powerpc/mm/highmem.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * highmem.c: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * PowerPC version, stolen from the i386 version. | ||
5 | * | ||
6 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
7 | * are not addressable by direct kernel virtual addresses. | ||
8 | * | ||
9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
10 | * Gerhard.Wichert@pdb.siemens.de | ||
11 | * | ||
12 | * | ||
13 | * Redesigned the x86 32-bit VM architecture to deal with | ||
14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | ||
15 | * we now support up to 64 Gigabytes physical RAM. | ||
16 | * | ||
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
18 | * | ||
19 | * Reworked for PowerPC by various contributors. Moved from | ||
20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | ||
21 | */ | ||
22 | |||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | /* | ||
27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
28 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
29 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
30 | * it. | ||
31 | */ | ||
32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
33 | { | ||
34 | unsigned int idx; | ||
35 | unsigned long vaddr; | ||
36 | |||
37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
38 | pagefault_disable(); | ||
39 | if (!PageHighMem(page)) | ||
40 | return page_address(page); | ||
41 | |||
42 | debug_kmap_atomic(type); | ||
43 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
45 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
46 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
47 | #endif | ||
48 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
49 | local_flush_tlb_page(NULL, vaddr); | ||
50 | |||
51 | return (void*) vaddr; | ||
52 | } | ||
53 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
54 | |||
55 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
56 | { | ||
57 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
60 | |||
61 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
62 | pagefault_enable(); | ||
63 | return; | ||
64 | } | ||
65 | |||
66 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
67 | |||
68 | /* | ||
69 | * force other mappings to Oops if they'll try to access | ||
70 | * this pte without first remap it | ||
71 | */ | ||
72 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
73 | local_flush_tlb_page(NULL, vaddr); | ||
74 | #endif | ||
75 | pagefault_enable(); | ||
76 | } | ||
77 | EXPORT_SYMBOL(kunmap_atomic); | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9920d6a7cf29..c46ef2ffa3d9 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
305 | 305 | ||
306 | pmd = pmd_offset(pud, start); | 306 | pmd = pmd_offset(pud, start); |
307 | pud_clear(pud); | 307 | pud_clear(pud); |
308 | pmd_free_tlb(tlb, pmd); | 308 | pmd_free_tlb(tlb, pmd, start); |
309 | } | 309 | } |
310 | 310 | ||
311 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | 311 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
348 | 348 | ||
349 | pud = pud_offset(pgd, start); | 349 | pud = pud_offset(pgd, start); |
350 | pgd_clear(pgd); | 350 | pgd_clear(pgd); |
351 | pud_free_tlb(tlb, pud); | 351 | pud_free_tlb(tlb, pud, start); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* | 354 | /* |
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 8343986809c0..b1a727def15b 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id) | |||
89 | id = first_context; | 89 | id = first_context; |
90 | continue; | 90 | continue; |
91 | } | 91 | } |
92 | pr_debug("[%d] steal context %d from mm @%p\n", | 92 | pr_devel("[%d] steal context %d from mm @%p\n", |
93 | smp_processor_id(), id, mm); | 93 | smp_processor_id(), id, mm); |
94 | 94 | ||
95 | /* Mark this mm has having no context anymore */ | 95 | /* Mark this mm has having no context anymore */ |
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id) | |||
126 | /* Pick up the victim mm */ | 126 | /* Pick up the victim mm */ |
127 | mm = context_mm[id]; | 127 | mm = context_mm[id]; |
128 | 128 | ||
129 | pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); | 129 | pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm); |
130 | 130 | ||
131 | /* Flush the TLB for that context */ | 131 | /* Flush the TLB for that context */ |
132 | local_flush_tlb_mm(mm); | 132 | local_flush_tlb_mm(mm); |
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
180 | spin_lock(&context_lock); | 180 | spin_lock(&context_lock); |
181 | 181 | ||
182 | #ifndef DEBUG_STEAL_ONLY | 182 | #ifndef DEBUG_STEAL_ONLY |
183 | pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n", | 183 | pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", |
184 | cpu, next, next->context.active, next->context.id); | 184 | cpu, next, next->context.active, next->context.id); |
185 | #endif | 185 | #endif |
186 | 186 | ||
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
189 | next->context.active++; | 189 | next->context.active++; |
190 | if (prev) { | 190 | if (prev) { |
191 | #ifndef DEBUG_STEAL_ONLY | 191 | #ifndef DEBUG_STEAL_ONLY |
192 | pr_debug(" old context %p active was: %d\n", | 192 | pr_devel(" old context %p active was: %d\n", |
193 | prev, prev->context.active); | 193 | prev, prev->context.active); |
194 | #endif | 194 | #endif |
195 | WARN_ON(prev->context.active < 1); | 195 | WARN_ON(prev->context.active < 1); |
@@ -217,6 +217,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
217 | id = steal_context_smp(id); | 217 | id = steal_context_smp(id); |
218 | if (id == MMU_NO_CONTEXT) | 218 | if (id == MMU_NO_CONTEXT) |
219 | goto again; | 219 | goto again; |
220 | goto stolen; | ||
220 | } | 221 | } |
221 | #endif /* CONFIG_SMP */ | 222 | #endif /* CONFIG_SMP */ |
222 | id = steal_context_up(id); | 223 | id = steal_context_up(id); |
@@ -236,7 +237,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
236 | next->context.id = id; | 237 | next->context.id = id; |
237 | 238 | ||
238 | #ifndef DEBUG_STEAL_ONLY | 239 | #ifndef DEBUG_STEAL_ONLY |
239 | pr_debug("[%d] picked up new id %d, nrf is now %d\n", | 240 | pr_devel("[%d] picked up new id %d, nrf is now %d\n", |
240 | cpu, id, nr_free_contexts); | 241 | cpu, id, nr_free_contexts); |
241 | #endif | 242 | #endif |
242 | 243 | ||
@@ -247,7 +248,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
247 | * local TLB for it and unmark it before we use it | 248 | * local TLB for it and unmark it before we use it |
248 | */ | 249 | */ |
249 | if (test_bit(id, stale_map[cpu])) { | 250 | if (test_bit(id, stale_map[cpu])) { |
250 | pr_debug("[%d] flushing stale context %d for mm @%p !\n", | 251 | pr_devel("[%d] flushing stale context %d for mm @%p !\n", |
251 | cpu, id, next); | 252 | cpu, id, next); |
252 | local_flush_tlb_mm(next); | 253 | local_flush_tlb_mm(next); |
253 | 254 | ||
@@ -314,13 +315,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |||
314 | switch (action) { | 315 | switch (action) { |
315 | case CPU_ONLINE: | 316 | case CPU_ONLINE: |
316 | case CPU_ONLINE_FROZEN: | 317 | case CPU_ONLINE_FROZEN: |
317 | pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu); | 318 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
318 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); | 319 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
319 | break; | 320 | break; |
320 | #ifdef CONFIG_HOTPLUG_CPU | 321 | #ifdef CONFIG_HOTPLUG_CPU |
321 | case CPU_DEAD: | 322 | case CPU_DEAD: |
322 | case CPU_DEAD_FROZEN: | 323 | case CPU_DEAD_FROZEN: |
323 | pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu); | 324 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
324 | kfree(stale_map[cpu]); | 325 | kfree(stale_map[cpu]); |
325 | stale_map[cpu] = NULL; | 326 | stale_map[cpu] = NULL; |
326 | break; | 327 | break; |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index ae1d67cc090c..627767d6169b 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte) | |||
129 | page = pfn_to_page(pfn); | 129 | page = pfn_to_page(pfn); |
130 | 130 | ||
131 | if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { | 131 | if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { |
132 | pr_debug("do_dcache_icache_coherency... flushing\n"); | 132 | pr_devel("do_dcache_icache_coherency... flushing\n"); |
133 | flush_dcache_icache_page(page); | 133 | flush_dcache_icache_page(page); |
134 | set_bit(PG_arch_1, &page->flags); | 134 | set_bit(PG_arch_1, &page->flags); |
135 | } | 135 | } |
136 | else | 136 | else |
137 | pr_debug("do_dcache_icache_coherency... already clean\n"); | 137 | pr_devel("do_dcache_icache_coherency... already clean\n"); |
138 | return __pte(pte_val(pte) | _PAGE_HWEXEC); | 138 | return __pte(pte_val(pte) | _PAGE_HWEXEC); |
139 | } | 139 | } |
140 | 140 | ||
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 3b52c80e5e33..5b7038f248b6 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -14,8 +14,6 @@ | |||
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #undef DEBUG | ||
18 | |||
19 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
20 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
21 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
@@ -27,11 +25,6 @@ | |||
27 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
28 | #include <asm/udbg.h> | 26 | #include <asm/udbg.h> |
29 | 27 | ||
30 | #ifdef DEBUG | ||
31 | #define DBG(fmt...) printk(fmt) | ||
32 | #else | ||
33 | #define DBG pr_debug | ||
34 | #endif | ||
35 | 28 | ||
36 | extern void slb_allocate_realmode(unsigned long ea); | 29 | extern void slb_allocate_realmode(unsigned long ea); |
37 | extern void slb_allocate_user(unsigned long ea); | 30 | extern void slb_allocate_user(unsigned long ea); |
@@ -285,13 +278,13 @@ void slb_initialize(void) | |||
285 | patch_slb_encoding(slb_compare_rr_to_size, | 278 | patch_slb_encoding(slb_compare_rr_to_size, |
286 | mmu_slb_size); | 279 | mmu_slb_size); |
287 | 280 | ||
288 | DBG("SLB: linear LLP = %04lx\n", linear_llp); | 281 | pr_devel("SLB: linear LLP = %04lx\n", linear_llp); |
289 | DBG("SLB: io LLP = %04lx\n", io_llp); | 282 | pr_devel("SLB: io LLP = %04lx\n", io_llp); |
290 | 283 | ||
291 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 284 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
292 | patch_slb_encoding(slb_miss_kernel_load_vmemmap, | 285 | patch_slb_encoding(slb_miss_kernel_load_vmemmap, |
293 | SLB_VSID_KERNEL | vmemmap_llp); | 286 | SLB_VSID_KERNEL | vmemmap_llp); |
294 | DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); | 287 | pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); |
295 | #endif | 288 | #endif |
296 | } | 289 | } |
297 | 290 | ||
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1be1b5e59796..937eb90677d9 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
72 | */ | 72 | */ |
73 | if (huge) { | 73 | if (huge) { |
74 | #ifdef CONFIG_HUGETLB_PAGE | 74 | #ifdef CONFIG_HUGETLB_PAGE |
75 | psize = get_slice_psize(mm, addr);; | 75 | psize = get_slice_psize(mm, addr); |
76 | #else | 76 | #else |
77 | BUG(); | 77 | BUG(); |
78 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ | 78 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |