aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
author <jgarzik@pretzel.yyz.us>2005-05-26 21:40:25 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-26 21:40:25 -0400
commit462cee296476278acaa54c41925b3273e0e4dd40 (patch)
treec9d9fcfc4dd62807cb67678a6eda0151bfa8be95 /arch/arm
parent126fa4b9ca5d9d7cb7d46f779ad3bd3631ca387c (diff)
parent4ec5240ec367a592834385893200dd4fb369354c (diff)
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git branch HEAD
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mach-s3c2410/clock.c2
-rw-r--r--arch/arm/mach-s3c2410/s3c2440.c6
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/copypage-v4mc.S80
-rw-r--r--arch/arm/mm/copypage-v4mc.c111
-rw-r--r--arch/arm/mm/copypage-v6.c28
-rw-r--r--arch/arm/mm/flush.c37
-rw-r--r--arch/arm/mm/mm-armv.c27
8 files changed, 186 insertions, 126 deletions
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index e23f534d4e1d..8d986b8401c2 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -478,7 +478,7 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
478{ 478{
479 unsigned long upllcon = __raw_readl(S3C2410_UPLLCON); 479 unsigned long upllcon = __raw_readl(S3C2410_UPLLCON);
480 480
481 s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate) * 2; 481 s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate);
482 482
483 printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n", 483 printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n",
484 print_mhz(s3c2440_clk_upll.rate)); 484 print_mhz(s3c2440_clk_upll.rate));
diff --git a/arch/arm/mach-s3c2410/s3c2440.c b/arch/arm/mach-s3c2410/s3c2440.c
index 9a8cc5ae2255..d4c8281b55f6 100644
--- a/arch/arm/mach-s3c2410/s3c2440.c
+++ b/arch/arm/mach-s3c2410/s3c2440.c
@@ -192,9 +192,11 @@ void __init s3c2440_map_io(struct map_desc *mach_desc, int size)
192 192
193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc)); 193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc));
194 iotable_init(mach_desc, size); 194 iotable_init(mach_desc, size);
195
195 /* rename any peripherals used differing from the s3c2410 */ 196 /* rename any peripherals used differing from the s3c2410 */
196 197
197 s3c_device_i2c.name = "s3c2440-i2c"; 198 s3c_device_i2c.name = "s3c2440-i2c";
199 s3c_device_nand.name = "s3c2440-nand";
198 200
199 /* change irq for watchdog */ 201 /* change irq for watchdog */
200 202
@@ -225,7 +227,7 @@ void __init s3c2440_init_clocks(int xtal)
225 break; 227 break;
226 228
227 case S3C2440_CLKDIVN_HDIVN_2: 229 case S3C2440_CLKDIVN_HDIVN_2:
228 hdiv = 1; 230 hdiv = 2;
229 break; 231 break;
230 232
231 case S3C2440_CLKDIVN_HDIVN_4_8: 233 case S3C2440_CLKDIVN_HDIVN_4_8:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4fc6be629de..48bac7da8c70 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -412,21 +412,20 @@ config CPU_BPREDICT_DISABLE
412 412
413config TLS_REG_EMUL 413config TLS_REG_EMUL
414 bool 414 bool
415 default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3) 415 default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
416 help 416 help
417 We might be running on an ARMv6+ processor which should have the TLS 417 An SMP system using a pre-ARMv6 processor (there are apparently
418 register but for some reason we can't use it, or maybe an SMP system 418 a few prototypes like that in existence) and therefore access to
419 using a pre-ARMv6 processor (there are apparently a few prototypes 419 that required register must be emulated.
420 like that in existence) and therefore access to that register must
421 be emulated.
422 420
423config HAS_TLS_REG 421config HAS_TLS_REG
424 bool 422 bool
425 depends on CPU_32v6 423 depends on !TLS_REG_EMUL
426 default y if !TLS_REG_EMUL 424 default y if SMP || CPU_32v7
427 help 425 help
428 This selects support for the CP15 thread register. 426 This selects support for the CP15 thread register.
429 It is defined to be available on ARMv6 or later. If a particular 427 It is defined to be available on some ARMv6 processors (including
430 ARMv6 or later CPU doesn't support it then it must omc;ide "select 428 all SMP capable ARMv6's) or later processors. User space may
431 TLS_REG_EMUL" along with its other caracteristics. 429 assume directly accessing that register and always obtain the
430 expected value only on ARMv7 and above.
432 431
diff --git a/arch/arm/mm/copypage-v4mc.S b/arch/arm/mm/copypage-v4mc.S
deleted file mode 100644
index 305af3dab3d8..000000000000
--- a/arch/arm/mm/copypage-v4mc.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * linux/arch/arm/lib/copy_page-armv4mc.S
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/constants.h>
15
16 .text
17 .align 5
18/*
19 * ARMv4 mini-dcache optimised copy_user_page
20 *
21 * We flush the destination cache lines just before we write the data into the
22 * corresponding address. Since the Dcache is read-allocate, this removes the
23 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
24 * and merged as appropriate.
25 *
26 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
27 * instruction. If your processor does not supply this, you have to write your
28 * own copy_user_page that does the right thing.
29 */
30ENTRY(v4_mc_copy_user_page)
31 stmfd sp!, {r4, lr} @ 2
32 mov r4, r0
33 mov r0, r1
34 bl map_page_minicache
35 mov r1, #PAGE_SZ/64 @ 1
36 ldmia r0!, {r2, r3, ip, lr} @ 4
371: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
38 stmia r4!, {r2, r3, ip, lr} @ 4
39 ldmia r0!, {r2, r3, ip, lr} @ 4+1
40 stmia r4!, {r2, r3, ip, lr} @ 4
41 ldmia r0!, {r2, r3, ip, lr} @ 4
42 mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
43 stmia r4!, {r2, r3, ip, lr} @ 4
44 ldmia r0!, {r2, r3, ip, lr} @ 4
45 subs r1, r1, #1 @ 1
46 stmia r4!, {r2, r3, ip, lr} @ 4
47 ldmneia r0!, {r2, r3, ip, lr} @ 4
48 bne 1b @ 1
49 ldmfd sp!, {r4, pc} @ 3
50
51 .align 5
52/*
53 * ARMv4 optimised clear_user_page
54 *
55 * Same story as above.
56 */
57ENTRY(v4_mc_clear_user_page)
58 str lr, [sp, #-4]!
59 mov r1, #PAGE_SZ/64 @ 1
60 mov r2, #0 @ 1
61 mov r3, #0 @ 1
62 mov ip, #0 @ 1
63 mov lr, #0 @ 1
641: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
65 stmia r0!, {r2, r3, ip, lr} @ 4
66 stmia r0!, {r2, r3, ip, lr} @ 4
67 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
68 stmia r0!, {r2, r3, ip, lr} @ 4
69 stmia r0!, {r2, r3, ip, lr} @ 4
70 subs r1, r1, #1 @ 1
71 bne 1b @ 1
72 ldr pc, [sp], #4
73
74 __INITDATA
75
76 .type v4_mc_user_fns, #object
77ENTRY(v4_mc_user_fns)
78 .long v4_mc_clear_user_page
79 .long v4_mc_copy_user_page
80 .size v4_mc_user_fns, . - v4_mc_user_fns
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
new file mode 100644
index 000000000000..fc69dccdace1
--- /dev/null
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -0,0 +1,111 @@
1/*
2 * linux/arch/arm/lib/copypage-armv4mc.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
28 L_PTE_CACHEABLE)
29
30#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
31
32static DEFINE_SPINLOCK(minicache_lock);
33
34/*
35 * ARMv4 mini-dcache optimised copy_user_page
36 *
37 * We flush the destination cache lines just before we write the data into the
38 * corresponding address. Since the Dcache is read-allocate, this removes the
39 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
40 * and merged as appropriate.
41 *
42 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
43 * instruction. If your processor does not supply this, you have to write your
44 * own copy_user_page that does the right thing.
45 */
46static void __attribute__((naked))
47mc_copy_user_page(void *from, void *to)
48{
49 asm volatile(
50 "stmfd sp!, {r4, lr} @ 2\n\
51 mov r4, %2 @ 1\n\
52 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
531: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
54 stmia %1!, {r2, r3, ip, lr} @ 4\n\
55 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
56 stmia %1!, {r2, r3, ip, lr} @ 4\n\
57 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
58 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
59 stmia %1!, {r2, r3, ip, lr} @ 4\n\
60 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
61 subs r4, r4, #1 @ 1\n\
62 stmia %1!, {r2, r3, ip, lr} @ 4\n\
63 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
64 bne 1b @ 1\n\
65 ldmfd sp!, {r4, pc} @ 3"
66 :
67 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
68}
69
70void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
71{
72 spin_lock(&minicache_lock);
73
74 set_pte(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
75 flush_tlb_kernel_page(0xffff8000);
76
77 mc_copy_user_page((void *)0xffff8000, kto);
78
79 spin_unlock(&minicache_lock);
80}
81
82/*
83 * ARMv4 optimised clear_user_page
84 */
85void __attribute__((naked))
86v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
87{
88 asm volatile(
89 "str lr, [sp, #-4]!\n\
90 mov r1, %0 @ 1\n\
91 mov r2, #0 @ 1\n\
92 mov r3, #0 @ 1\n\
93 mov ip, #0 @ 1\n\
94 mov lr, #0 @ 1\n\
951: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
96 stmia r0!, {r2, r3, ip, lr} @ 4\n\
97 stmia r0!, {r2, r3, ip, lr} @ 4\n\
98 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
99 stmia r0!, {r2, r3, ip, lr} @ 4\n\
100 stmia r0!, {r2, r3, ip, lr} @ 4\n\
101 subs r1, r1, #1 @ 1\n\
102 bne 1b @ 1\n\
103 ldr pc, [sp], #4"
104 :
105 : "I" (PAGE_SIZE / 64));
106}
107
108struct cpu_user_fns v4_mc_user_fns __initdata = {
109 .cpu_clear_user_page = v4_mc_clear_user_page,
110 .cpu_copy_user_page = v4_mc_copy_user_page,
111};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 694ac8208858..a8c00236bd3d 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -26,8 +26,8 @@
26#define to_address (0xffffc000) 26#define to_address (0xffffc000)
27#define to_pgprot PAGE_KERNEL 27#define to_pgprot PAGE_KERNEL
28 28
29static pte_t *from_pte; 29#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
30static pte_t *to_pte; 30
31static DEFINE_SPINLOCK(v6_lock); 31static DEFINE_SPINLOCK(v6_lock);
32 32
33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
@@ -74,8 +74,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
74 */ 74 */
75 spin_lock(&v6_lock); 75 spin_lock(&v6_lock);
76 76
77 set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 77 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
78 set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 78 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
79 79
80 from = from_address + (offset << PAGE_SHIFT); 80 from = from_address + (offset << PAGE_SHIFT);
81 to = to_address + (offset << PAGE_SHIFT); 81 to = to_address + (offset << PAGE_SHIFT);
@@ -114,7 +114,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
114 */ 114 */
115 spin_lock(&v6_lock); 115 spin_lock(&v6_lock);
116 116
117 set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 117 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
118 flush_tlb_kernel_page(to); 118 flush_tlb_kernel_page(to);
119 clear_page((void *)to); 119 clear_page((void *)to);
120 120
@@ -129,21 +129,6 @@ struct cpu_user_fns v6_user_fns __initdata = {
129static int __init v6_userpage_init(void) 129static int __init v6_userpage_init(void)
130{ 130{
131 if (cache_is_vipt_aliasing()) { 131 if (cache_is_vipt_aliasing()) {
132 pgd_t *pgd;
133 pmd_t *pmd;
134
135 pgd = pgd_offset_k(from_address);
136 pmd = pmd_alloc(&init_mm, pgd, from_address);
137 if (!pmd)
138 BUG();
139 from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
140 if (!from_pte)
141 BUG();
142
143 to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
144 if (!to_pte)
145 BUG();
146
147 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 132 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
148 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 133 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
149 } 134 }
@@ -151,5 +136,4 @@ static int __init v6_userpage_init(void)
151 return 0; 136 return 0;
152} 137}
153 138
154__initcall(v6_userpage_init); 139core_initcall(v6_userpage_init);
155
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6de48d89503..4085ed983e46 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,29 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_CPU_CACHE_VIPT
19#define ALIAS_FLUSH_START 0xffff4000
20
21#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
22
23static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
24{
25 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
26
27 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
28 flush_tlb_kernel_page(to);
29
30 asm( "mcrr p15, 0, %1, %0, c14\n"
31 " mcrr p15, 0, %1, %0, c5\n"
32 :
33 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
34 : "cc");
35}
36#else
37#define flush_pfn_alias(pfn,vaddr) do { } while (0)
38#endif
16 39
17static void __flush_dcache_page(struct address_space *mapping, struct page *page) 40static void __flush_dcache_page(struct address_space *mapping, struct page *page)
18{ 41{
@@ -37,6 +60,18 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
37 return; 60 return;
38 61
39 /* 62 /*
63 * This is a page cache page. If we have a VIPT cache, we
64 * only need to do one flush - which would be at the relevant
65 * userspace colour, which is congruent with page->index.
66 */
67 if (cache_is_vipt()) {
68 if (cache_is_vipt_aliasing())
69 flush_pfn_alias(page_to_pfn(page),
70 page->index << PAGE_CACHE_SHIFT);
71 return;
72 }
73
74 /*
40 * There are possible user space mappings of this page: 75 * There are possible user space mappings of this page:
41 * - VIVT cache: we need to also write back and invalidate all user 76 * - VIVT cache: we need to also write back and invalidate all user
42 * data in the current VM view associated with this page. 77 * data in the current VM view associated with this page.
@@ -57,8 +92,6 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
57 continue; 92 continue;
58 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 93 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
59 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 94 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
60 if (cache_is_vipt())
61 break;
62 } 95 }
63 flush_dcache_mmap_unlock(mapping); 96 flush_dcache_mmap_unlock(mapping);
64} 97}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 585dfb8e20b9..2c2b93d77d43 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -37,6 +37,8 @@ pgprot_t pgprot_kernel;
37 37
38EXPORT_SYMBOL(pgprot_kernel); 38EXPORT_SYMBOL(pgprot_kernel);
39 39
40pmd_t *top_pmd;
41
40struct cachepolicy { 42struct cachepolicy {
41 const char policy[16]; 43 const char policy[16];
42 unsigned int cr_mask; 44 unsigned int cr_mask;
@@ -142,6 +144,16 @@ __setup("noalign", noalign_setup);
142 144
143#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) 145#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
144 146
147static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
148{
149 return pmd_offset(pgd, virt);
150}
151
152static inline pmd_t *pmd_off_k(unsigned long virt)
153{
154 return pmd_off(pgd_offset_k(virt), virt);
155}
156
145/* 157/*
146 * need to get a 16k page for level 1 158 * need to get a 16k page for level 1
147 */ 159 */
@@ -220,7 +232,7 @@ void free_pgd_slow(pgd_t *pgd)
220 return; 232 return;
221 233
222 /* pgd is always present and good */ 234 /* pgd is always present and good */
223 pmd = (pmd_t *)pgd; 235 pmd = pmd_off(pgd, 0);
224 if (pmd_none(*pmd)) 236 if (pmd_none(*pmd))
225 goto free; 237 goto free;
226 if (pmd_bad(*pmd)) { 238 if (pmd_bad(*pmd)) {
@@ -246,9 +258,8 @@ free:
246static inline void 258static inline void
247alloc_init_section(unsigned long virt, unsigned long phys, int prot) 259alloc_init_section(unsigned long virt, unsigned long phys, int prot)
248{ 260{
249 pmd_t *pmdp; 261 pmd_t *pmdp = pmd_off_k(virt);
250 262
251 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252 if (virt & (1 << 20)) 263 if (virt & (1 << 20))
253 pmdp++; 264 pmdp++;
254 265
@@ -283,11 +294,9 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
283static inline void 294static inline void
284alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) 295alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
285{ 296{
286 pmd_t *pmdp; 297 pmd_t *pmdp = pmd_off_k(virt);
287 pte_t *ptep; 298 pte_t *ptep;
288 299
289 pmdp = pmd_offset(pgd_offset_k(virt), virt);
290
291 if (pmd_none(*pmdp)) { 300 if (pmd_none(*pmdp)) {
292 unsigned long pmdval; 301 unsigned long pmdval;
293 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 302 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
@@ -310,7 +319,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
310 */ 319 */
311static inline void clear_mapping(unsigned long virt) 320static inline void clear_mapping(unsigned long virt)
312{ 321{
313 pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); 322 pmd_clear(pmd_off_k(virt));
314} 323}
315 324
316struct mem_types { 325struct mem_types {
@@ -578,7 +587,7 @@ void setup_mm_for_reboot(char mode)
578 PMD_TYPE_SECT; 587 PMD_TYPE_SECT;
579 if (cpu_arch <= CPU_ARCH_ARMv5) 588 if (cpu_arch <= CPU_ARCH_ARMv5)
580 pmdval |= PMD_BIT4; 589 pmdval |= PMD_BIT4;
581 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); 590 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
582 pmd[0] = __pmd(pmdval); 591 pmd[0] = __pmd(pmdval);
583 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 592 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
584 flush_pmd_entry(pmd); 593 flush_pmd_entry(pmd);
@@ -675,6 +684,8 @@ void __init memtable_init(struct meminfo *mi)
675 684
676 flush_cache_all(); 685 flush_cache_all();
677 flush_tlb_all(); 686 flush_tlb_all();
687
688 top_pmd = pmd_off_k(0xffff0000);
678} 689}
679 690
680/* 691/*