aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-16 13:27:11 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-16 13:27:11 -0400
commit2a4a7e02e27b4f542473772d588f81759c209fb3 (patch)
tree9b3f40337525c1bdec6b14936184c9794dcd2356
parent85bcc13072c54592596c5b41d40d1c6a18b04e19 (diff)
parentbfd4e0709fb977e64e27d9255be6e7aeadf4fcd4 (diff)
Automatic merge of master.kernel.org:/home/rmk/linux-2.6-rmk.git
-rw-r--r--arch/arm/mach-s3c2410/clock.c2
-rw-r--r--arch/arm/mach-s3c2410/s3c2440.c6
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/copypage-v4mc.S80
-rw-r--r--arch/arm/mm/copypage-v4mc.c111
-rw-r--r--arch/arm/mm/copypage-v6.c28
-rw-r--r--arch/arm/mm/flush.c37
-rw-r--r--arch/arm/mm/mm-armv.c27
-rw-r--r--include/asm-arm/arch-imx/imx-regs.h24
-rw-r--r--include/asm-arm/arch-s3c2410/regs-nand.h44
-rw-r--r--include/asm-arm/page.h18
11 files changed, 256 insertions, 142 deletions
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index e23f534d4e1d..8d986b8401c2 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -478,7 +478,7 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
478{ 478{
479 unsigned long upllcon = __raw_readl(S3C2410_UPLLCON); 479 unsigned long upllcon = __raw_readl(S3C2410_UPLLCON);
480 480
481 s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate) * 2; 481 s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate);
482 482
483 printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n", 483 printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n",
484 print_mhz(s3c2440_clk_upll.rate)); 484 print_mhz(s3c2440_clk_upll.rate));
diff --git a/arch/arm/mach-s3c2410/s3c2440.c b/arch/arm/mach-s3c2410/s3c2440.c
index 9a8cc5ae2255..d4c8281b55f6 100644
--- a/arch/arm/mach-s3c2410/s3c2440.c
+++ b/arch/arm/mach-s3c2410/s3c2440.c
@@ -192,9 +192,11 @@ void __init s3c2440_map_io(struct map_desc *mach_desc, int size)
192 192
193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc)); 193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc));
194 iotable_init(mach_desc, size); 194 iotable_init(mach_desc, size);
195
195 /* rename any peripherals used differing from the s3c2410 */ 196 /* rename any peripherals used differing from the s3c2410 */
196 197
197 s3c_device_i2c.name = "s3c2440-i2c"; 198 s3c_device_i2c.name = "s3c2440-i2c";
199 s3c_device_nand.name = "s3c2440-nand";
198 200
199 /* change irq for watchdog */ 201 /* change irq for watchdog */
200 202
@@ -225,7 +227,7 @@ void __init s3c2440_init_clocks(int xtal)
225 break; 227 break;
226 228
227 case S3C2440_CLKDIVN_HDIVN_2: 229 case S3C2440_CLKDIVN_HDIVN_2:
228 hdiv = 1; 230 hdiv = 2;
229 break; 231 break;
230 232
231 case S3C2440_CLKDIVN_HDIVN_4_8: 233 case S3C2440_CLKDIVN_HDIVN_4_8:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4fc6be629de..48bac7da8c70 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -412,21 +412,20 @@ config CPU_BPREDICT_DISABLE
412 412
413config TLS_REG_EMUL 413config TLS_REG_EMUL
414 bool 414 bool
415 default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3) 415 default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
416 help 416 help
417 We might be running on an ARMv6+ processor which should have the TLS 417 An SMP system using a pre-ARMv6 processor (there are apparently
418 register but for some reason we can't use it, or maybe an SMP system 418 a few prototypes like that in existence) and therefore access to
419 using a pre-ARMv6 processor (there are apparently a few prototypes 419 that required register must be emulated.
420 like that in existence) and therefore access to that register must
421 be emulated.
422 420
423config HAS_TLS_REG 421config HAS_TLS_REG
424 bool 422 bool
425 depends on CPU_32v6 423 depends on !TLS_REG_EMUL
426 default y if !TLS_REG_EMUL 424 default y if SMP || CPU_32v7
427 help 425 help
428 This selects support for the CP15 thread register. 426 This selects support for the CP15 thread register.
429 It is defined to be available on ARMv6 or later. If a particular 427 It is defined to be available on some ARMv6 processors (including
430 ARMv6 or later CPU doesn't support it then it must omc;ide "select 428 all SMP capable ARMv6's) or later processors. User space may
431 TLS_REG_EMUL" along with its other caracteristics. 429 assume directly accessing that register and always obtain the
430 expected value only on ARMv7 and above.
432 431
diff --git a/arch/arm/mm/copypage-v4mc.S b/arch/arm/mm/copypage-v4mc.S
deleted file mode 100644
index 305af3dab3d8..000000000000
--- a/arch/arm/mm/copypage-v4mc.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * linux/arch/arm/lib/copy_page-armv4mc.S
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/constants.h>
15
16 .text
17 .align 5
18/*
19 * ARMv4 mini-dcache optimised copy_user_page
20 *
21 * We flush the destination cache lines just before we write the data into the
22 * corresponding address. Since the Dcache is read-allocate, this removes the
23 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
24 * and merged as appropriate.
25 *
26 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
27 * instruction. If your processor does not supply this, you have to write your
28 * own copy_user_page that does the right thing.
29 */
30ENTRY(v4_mc_copy_user_page)
31 stmfd sp!, {r4, lr} @ 2
32 mov r4, r0
33 mov r0, r1
34 bl map_page_minicache
35 mov r1, #PAGE_SZ/64 @ 1
36 ldmia r0!, {r2, r3, ip, lr} @ 4
371: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
38 stmia r4!, {r2, r3, ip, lr} @ 4
39 ldmia r0!, {r2, r3, ip, lr} @ 4+1
40 stmia r4!, {r2, r3, ip, lr} @ 4
41 ldmia r0!, {r2, r3, ip, lr} @ 4
42 mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
43 stmia r4!, {r2, r3, ip, lr} @ 4
44 ldmia r0!, {r2, r3, ip, lr} @ 4
45 subs r1, r1, #1 @ 1
46 stmia r4!, {r2, r3, ip, lr} @ 4
47 ldmneia r0!, {r2, r3, ip, lr} @ 4
48 bne 1b @ 1
49 ldmfd sp!, {r4, pc} @ 3
50
51 .align 5
52/*
53 * ARMv4 optimised clear_user_page
54 *
55 * Same story as above.
56 */
57ENTRY(v4_mc_clear_user_page)
58 str lr, [sp, #-4]!
59 mov r1, #PAGE_SZ/64 @ 1
60 mov r2, #0 @ 1
61 mov r3, #0 @ 1
62 mov ip, #0 @ 1
63 mov lr, #0 @ 1
641: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
65 stmia r0!, {r2, r3, ip, lr} @ 4
66 stmia r0!, {r2, r3, ip, lr} @ 4
67 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
68 stmia r0!, {r2, r3, ip, lr} @ 4
69 stmia r0!, {r2, r3, ip, lr} @ 4
70 subs r1, r1, #1 @ 1
71 bne 1b @ 1
72 ldr pc, [sp], #4
73
74 __INITDATA
75
76 .type v4_mc_user_fns, #object
77ENTRY(v4_mc_user_fns)
78 .long v4_mc_clear_user_page
79 .long v4_mc_copy_user_page
80 .size v4_mc_user_fns, . - v4_mc_user_fns
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
new file mode 100644
index 000000000000..16384a7600a1
--- /dev/null
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -0,0 +1,111 @@
1/*
2 * linux/arch/arm/lib/copypage-armv4mc.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
28 L_PTE_CACHEABLE)
29
30#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
31
32static DEFINE_SPINLOCK(minicache_lock);
33
34/*
35 * ARMv4 mini-dcache optimised copy_user_page
36 *
37 * We flush the destination cache lines just before we write the data into the
38 * corresponding address. Since the Dcache is read-allocate, this removes the
39 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
40 * and merged as appropriate.
41 *
42 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
43 * instruction. If your processor does not supply this, you have to write your
44 * own copy_user_page that does the right thing.
45 */
46static void __attribute__((naked))
47mc_copy_user_page(void *from, void *to)
48{
49 asm volatile(
50 "stmfd sp!, {r4, lr} @ 2\n\
51 mov r4, %2 @ 1\n\
52 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
531: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
54 stmia %1!, {r2, r3, ip, lr} @ 4\n\
55 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
56 stmia %1!, {r2, r3, ip, lr} @ 4\n\
57 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
58 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
59 stmia %1!, {r2, r3, ip, lr} @ 4\n\
60 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
61 subs r4, r4, #1 @ 1\n\
62 stmia %1!, {r2, r3, ip, lr} @ 4\n\
63 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
64 bne 1b @ 1\n\
65 ldmfd sp!, {r4, pc} @ 3"
66 :
67 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
68}
69
70void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
71{
72 spin_lock(&minicache_lock);
73
74 set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
75 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
76
77 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
78
79 spin_unlock(&minicache_lock);
80}
81
82/*
83 * ARMv4 optimised clear_user_page
84 */
85void __attribute__((naked))
86v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
87{
88 asm volatile(
89 "str lr, [sp, #-4]!\n\
90 mov r1, %0 @ 1\n\
91 mov r2, #0 @ 1\n\
92 mov r3, #0 @ 1\n\
93 mov ip, #0 @ 1\n\
94 mov lr, #0 @ 1\n\
951: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
96 stmia r0!, {r2, r3, ip, lr} @ 4\n\
97 stmia r0!, {r2, r3, ip, lr} @ 4\n\
98 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
99 stmia r0!, {r2, r3, ip, lr} @ 4\n\
100 stmia r0!, {r2, r3, ip, lr} @ 4\n\
101 subs r1, r1, #1 @ 1\n\
102 bne 1b @ 1\n\
103 ldr pc, [sp], #4"
104 :
105 : "I" (PAGE_SIZE / 64));
106}
107
108struct cpu_user_fns v4_mc_user_fns __initdata = {
109 .cpu_clear_user_page = v4_mc_clear_user_page,
110 .cpu_copy_user_page = v4_mc_copy_user_page,
111};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 694ac8208858..a8c00236bd3d 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -26,8 +26,8 @@
26#define to_address (0xffffc000) 26#define to_address (0xffffc000)
27#define to_pgprot PAGE_KERNEL 27#define to_pgprot PAGE_KERNEL
28 28
29static pte_t *from_pte; 29#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
30static pte_t *to_pte; 30
31static DEFINE_SPINLOCK(v6_lock); 31static DEFINE_SPINLOCK(v6_lock);
32 32
33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
@@ -74,8 +74,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
74 */ 74 */
75 spin_lock(&v6_lock); 75 spin_lock(&v6_lock);
76 76
77 set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 77 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
78 set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 78 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
79 79
80 from = from_address + (offset << PAGE_SHIFT); 80 from = from_address + (offset << PAGE_SHIFT);
81 to = to_address + (offset << PAGE_SHIFT); 81 to = to_address + (offset << PAGE_SHIFT);
@@ -114,7 +114,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
114 */ 114 */
115 spin_lock(&v6_lock); 115 spin_lock(&v6_lock);
116 116
117 set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 117 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
118 flush_tlb_kernel_page(to); 118 flush_tlb_kernel_page(to);
119 clear_page((void *)to); 119 clear_page((void *)to);
120 120
@@ -129,21 +129,6 @@ struct cpu_user_fns v6_user_fns __initdata = {
129static int __init v6_userpage_init(void) 129static int __init v6_userpage_init(void)
130{ 130{
131 if (cache_is_vipt_aliasing()) { 131 if (cache_is_vipt_aliasing()) {
132 pgd_t *pgd;
133 pmd_t *pmd;
134
135 pgd = pgd_offset_k(from_address);
136 pmd = pmd_alloc(&init_mm, pgd, from_address);
137 if (!pmd)
138 BUG();
139 from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
140 if (!from_pte)
141 BUG();
142
143 to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
144 if (!to_pte)
145 BUG();
146
147 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 132 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
148 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 133 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
149 } 134 }
@@ -151,5 +136,4 @@ static int __init v6_userpage_init(void)
151 return 0; 136 return 0;
152} 137}
153 138
154__initcall(v6_userpage_init); 139core_initcall(v6_userpage_init);
155
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6de48d89503..4085ed983e46 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,29 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_CPU_CACHE_VIPT
19#define ALIAS_FLUSH_START 0xffff4000
20
21#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
22
23static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
24{
25 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
26
27 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
28 flush_tlb_kernel_page(to);
29
30 asm( "mcrr p15, 0, %1, %0, c14\n"
31 " mcrr p15, 0, %1, %0, c5\n"
32 :
33 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
34 : "cc");
35}
36#else
37#define flush_pfn_alias(pfn,vaddr) do { } while (0)
38#endif
16 39
17static void __flush_dcache_page(struct address_space *mapping, struct page *page) 40static void __flush_dcache_page(struct address_space *mapping, struct page *page)
18{ 41{
@@ -37,6 +60,18 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
37 return; 60 return;
38 61
39 /* 62 /*
63 * This is a page cache page. If we have a VIPT cache, we
64 * only need to do one flush - which would be at the relevant
65 * userspace colour, which is congruent with page->index.
66 */
67 if (cache_is_vipt()) {
68 if (cache_is_vipt_aliasing())
69 flush_pfn_alias(page_to_pfn(page),
70 page->index << PAGE_CACHE_SHIFT);
71 return;
72 }
73
74 /*
40 * There are possible user space mappings of this page: 75 * There are possible user space mappings of this page:
41 * - VIVT cache: we need to also write back and invalidate all user 76 * - VIVT cache: we need to also write back and invalidate all user
42 * data in the current VM view associated with this page. 77 * data in the current VM view associated with this page.
@@ -57,8 +92,6 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
57 continue; 92 continue;
58 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 93 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
59 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 94 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
60 if (cache_is_vipt())
61 break;
62 } 95 }
63 flush_dcache_mmap_unlock(mapping); 96 flush_dcache_mmap_unlock(mapping);
64} 97}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 585dfb8e20b9..fa60fd65fcf8 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -37,6 +37,8 @@ pgprot_t pgprot_kernel;
37 37
38EXPORT_SYMBOL(pgprot_kernel); 38EXPORT_SYMBOL(pgprot_kernel);
39 39
40pmd_t *top_pmd;
41
40struct cachepolicy { 42struct cachepolicy {
41 const char policy[16]; 43 const char policy[16];
42 unsigned int cr_mask; 44 unsigned int cr_mask;
@@ -142,6 +144,16 @@ __setup("noalign", noalign_setup);
142 144
143#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) 145#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
144 146
147static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
148{
149 return pmd_offset(pgd, virt);
150}
151
152static inline pmd_t *pmd_off_k(unsigned long virt)
153{
154 return pmd_off(pgd_offset_k(virt), virt);
155}
156
145/* 157/*
146 * need to get a 16k page for level 1 158 * need to get a 16k page for level 1
147 */ 159 */
@@ -220,7 +232,7 @@ void free_pgd_slow(pgd_t *pgd)
220 return; 232 return;
221 233
222 /* pgd is always present and good */ 234 /* pgd is always present and good */
223 pmd = (pmd_t *)pgd; 235 pmd = pmd_off(pgd, 0);
224 if (pmd_none(*pmd)) 236 if (pmd_none(*pmd))
225 goto free; 237 goto free;
226 if (pmd_bad(*pmd)) { 238 if (pmd_bad(*pmd)) {
@@ -246,9 +258,8 @@ free:
246static inline void 258static inline void
247alloc_init_section(unsigned long virt, unsigned long phys, int prot) 259alloc_init_section(unsigned long virt, unsigned long phys, int prot)
248{ 260{
249 pmd_t *pmdp; 261 pmd_t *pmdp = pmd_off_k(virt);
250 262
251 pmdp = pmd_offset(pgd_offset_k(virt), virt);
252 if (virt & (1 << 20)) 263 if (virt & (1 << 20))
253 pmdp++; 264 pmdp++;
254 265
@@ -283,11 +294,9 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
283static inline void 294static inline void
284alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) 295alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
285{ 296{
286 pmd_t *pmdp; 297 pmd_t *pmdp = pmd_off_k(virt);
287 pte_t *ptep; 298 pte_t *ptep;
288 299
289 pmdp = pmd_offset(pgd_offset_k(virt), virt);
290
291 if (pmd_none(*pmdp)) { 300 if (pmd_none(*pmdp)) {
292 unsigned long pmdval; 301 unsigned long pmdval;
293 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 302 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
@@ -310,7 +319,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
310 */ 319 */
311static inline void clear_mapping(unsigned long virt) 320static inline void clear_mapping(unsigned long virt)
312{ 321{
313 pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); 322 pmd_clear(pmd_off_k(virt));
314} 323}
315 324
316struct mem_types { 325struct mem_types {
@@ -578,7 +587,7 @@ void setup_mm_for_reboot(char mode)
578 PMD_TYPE_SECT; 587 PMD_TYPE_SECT;
579 if (cpu_arch <= CPU_ARCH_ARMv5) 588 if (cpu_arch <= CPU_ARCH_ARMv5)
580 pmdval |= PMD_BIT4; 589 pmdval |= PMD_BIT4;
581 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); 590 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
582 pmd[0] = __pmd(pmdval); 591 pmd[0] = __pmd(pmdval);
583 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 592 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
584 flush_pmd_entry(pmd); 593 flush_pmd_entry(pmd);
@@ -675,6 +684,8 @@ void __init memtable_init(struct meminfo *mi)
675 684
676 flush_cache_all(); 685 flush_cache_all();
677 flush_tlb_all(); 686 flush_tlb_all();
687
688 top_pmd = pmd_off_k(VECTORS_HIGH);
678} 689}
679 690
680/* 691/*
diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h
index f32c203952cf..93b840e8fa60 100644
--- a/include/asm-arm/arch-imx/imx-regs.h
+++ b/include/asm-arm/arch-imx/imx-regs.h
@@ -228,6 +228,30 @@
228#define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 ) 228#define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 )
229 229
230/* 230/*
231 * PWM controller
232 */
233#define PWMC __REG(IMX_PWM_BASE + 0x00) /* PWM Control Register */
234#define PWMS __REG(IMX_PWM_BASE + 0x04) /* PWM Sample Register */
235#define PWMP __REG(IMX_PWM_BASE + 0x08) /* PWM Period Register */
236#define PWMCNT __REG(IMX_PWM_BASE + 0x0C) /* PWM Counter Register */
237
238#define PWMC_HCTR (0x01<<18) /* Halfword FIFO Data Swapping */
239#define PWMC_BCTR (0x01<<17) /* Byte FIFO Data Swapping */
240#define PWMC_SWR (0x01<<16) /* Software Reset */
241#define PWMC_CLKSRC (0x01<<15) /* Clock Source */
242#define PWMC_PRESCALER(x) (((x-1) & 0x7F) << 8) /* PRESCALER */
243#define PWMC_IRQ (0x01<< 7) /* Interrupt Request */
244#define PWMC_IRQEN (0x01<< 6) /* Interrupt Request Enable */
245#define PWMC_FIFOAV (0x01<< 5) /* FIFO Available */
246#define PWMC_EN (0x01<< 4) /* Enables/Disables the PWM */
247#define PWMC_REPEAT(x) (((x) & 0x03) << 2) /* Sample Repeats */
248#define PWMC_CLKSEL(x) (((x) & 0x03) << 0) /* Clock Selection */
249
250#define PWMS_SAMPLE(x) ((x) & 0xFFFF) /* Contains a two-sample word */
251#define PWMP_PERIOD(x) ((x) & 0xFFFF) /* Represents the PWM's period */
252#define PWMC_COUNTER(x) ((x) & 0xFFFF) /* Represents the current count value */
253
254/*
231 * DMA Controller 255 * DMA Controller
232 */ 256 */
233#define DCR __REG(IMX_DMAC_BASE +0x00) /* DMA Control Register */ 257#define DCR __REG(IMX_DMAC_BASE +0x00) /* DMA Control Register */
diff --git a/include/asm-arm/arch-s3c2410/regs-nand.h b/include/asm-arm/arch-s3c2410/regs-nand.h
index c443ac834698..7cff235e667a 100644
--- a/include/asm-arm/arch-s3c2410/regs-nand.h
+++ b/include/asm-arm/arch-s3c2410/regs-nand.h
@@ -1,16 +1,17 @@
1/* linux/include/asm-arm/arch-s3c2410/regs-nand.h 1/* linux/include/asm-arm/arch-s3c2410/regs-nand.h
2 * 2 *
3 * Copyright (c) 2004 Simtec Electronics <linux@simtec.co.uk> 3 * Copyright (c) 2004,2005 Simtec Electronics <linux@simtec.co.uk>
4 * http://www.simtec.co.uk/products/SWLINUX/ 4 * http://www.simtec.co.uk/products/SWLINUX/
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * S3C2410 clock register definitions 10 * S3C2410 NAND register definitions
11 * 11 *
12 * Changelog: 12 * Changelog:
13 * 18-Aug-2004 BJD Copied file from 2.4 and updated 13 * 18-Aug-2004 BJD Copied file from 2.4 and updated
14 * 01-May-2005 BJD Added definitions for s3c2440 controller
14*/ 15*/
15 16
16#ifndef __ASM_ARM_REGS_NAND 17#ifndef __ASM_ARM_REGS_NAND
@@ -26,6 +27,22 @@
26#define S3C2410_NFSTAT S3C2410_NFREG(0x10) 27#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
27#define S3C2410_NFECC S3C2410_NFREG(0x14) 28#define S3C2410_NFECC S3C2410_NFREG(0x14)
28 29
30#define S3C2440_NFCONT S3C2410_NFREG(0x04)
31#define S3C2440_NFCMD S3C2410_NFREG(0x08)
32#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
33#define S3C2440_NFDATA S3C2410_NFREG(0x10)
34#define S3C2440_NFECCD0 S3C2410_NFREG(0x14)
35#define S3C2440_NFECCD1 S3C2410_NFREG(0x18)
36#define S3C2440_NFECCD S3C2410_NFREG(0x1C)
37#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
38#define S3C2440_NFESTAT0 S3C2410_NFREG(0x24)
39#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
40#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
41#define S3C2440_NFMECC1 S3C2410_NFREG(0x30)
42#define S3C2440_NFSECC S3C2410_NFREG(0x34)
43#define S3C2440_NFSBLK S3C2410_NFREG(0x38)
44#define S3C2440_NFEBLK S3C2410_NFREG(0x3C)
45
29#define S3C2410_NFCONF_EN (1<<15) 46#define S3C2410_NFCONF_EN (1<<15)
30#define S3C2410_NFCONF_512BYTE (1<<14) 47#define S3C2410_NFCONF_512BYTE (1<<14)
31#define S3C2410_NFCONF_4STEP (1<<13) 48#define S3C2410_NFCONF_4STEP (1<<13)
@@ -37,7 +54,28 @@
37 54
38#define S3C2410_NFSTAT_BUSY (1<<0) 55#define S3C2410_NFSTAT_BUSY (1<<0)
39 56
40/* think ECC can only be 8bit read? */ 57#define S3C2440_NFCONF_BUSWIDTH_8 (0<<0)
58#define S3C2440_NFCONF_BUSWIDTH_16 (1<<0)
59#define S3C2440_NFCONF_ADVFLASH (1<<3)
60#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
61#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
62#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
63
64#define S3C2440_NFCONT_LOCKTIGHT (1<<13)
65#define S3C2440_NFCONT_SOFTLOCK (1<<12)
66#define S3C2440_NFCONT_ILLEGALACC_EN (1<<10)
67#define S3C2440_NFCONT_RNBINT_EN (1<<9)
68#define S3C2440_NFCONT_RN_FALLING (1<<8)
69#define S3C2440_NFCONT_SPARE_ECCLOCK (1<<6)
70#define S3C2440_NFCONT_MAIN_ECCLOCK (1<<5)
71#define S3C2440_NFCONT_INITECC (1<<4)
72#define S3C2440_NFCONT_nFCE (1<<1)
73#define S3C2440_NFCONT_ENABLE (1<<0)
74
75#define S3C2440_NFSTAT_READY (1<<0)
76#define S3C2440_NFSTAT_nCE (1<<1)
77#define S3C2440_NFSTAT_RnB_CHANGE (1<<2)
78#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3)
41 79
42#endif /* __ASM_ARM_REGS_NAND */ 80#endif /* __ASM_ARM_REGS_NAND */
43 81
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 4ca3a8e9348f..019c45d75730 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -114,19 +114,8 @@ extern void __cpu_copy_user_page(void *to, const void *from,
114 unsigned long user); 114 unsigned long user);
115#endif 115#endif
116 116
117#define clear_user_page(addr,vaddr,pg) \ 117#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
118 do { \ 118#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
119 preempt_disable(); \
120 __cpu_clear_user_page(addr, vaddr); \
121 preempt_enable(); \
122 } while (0)
123
124#define copy_user_page(to,from,vaddr,pg) \
125 do { \
126 preempt_disable(); \
127 __cpu_copy_user_page(to, from, vaddr); \
128 preempt_enable(); \
129 } while (0)
130 119
131#define clear_page(page) memzero((void *)(page), PAGE_SIZE) 120#define clear_page(page) memzero((void *)(page), PAGE_SIZE)
132extern void copy_page(void *to, const void *from); 121extern void copy_page(void *to, const void *from);
@@ -171,6 +160,9 @@ typedef unsigned long pgprot_t;
171 160
172#endif /* STRICT_MM_TYPECHECKS */ 161#endif /* STRICT_MM_TYPECHECKS */
173 162
163/* the upper-most page table pointer */
164extern pmd_t *top_pmd;
165
174/* Pure 2^n version of get_order */ 166/* Pure 2^n version of get_order */
175static inline int get_order(unsigned long size) 167static inline int get_order(unsigned long size)
176{ 168{