aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-11-28 10:39:02 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-28 10:39:02 -0500
commit7ef4de17cc55a3c3b8d093743b1e3b845d8eba47 (patch)
treeadf87c996affbb6c42850d55cb34c0e5a2f6d340
parentf412b09f4ed7c57f5b8935ed7d6fc786f402a629 (diff)
parentb5ee9002583fc14e6d45a04c18f208987a8fbced (diff)
Merge branch 'highmem' into devel
Conflicts: arch/arm/mach-clps7500/include/mach/memory.h
-rw-r--r--arch/arm/include/asm/memory.h7
-rw-r--r--arch/arm/include/asm/page.h30
-rw-r--r--arch/arm/include/asm/setup.h6
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/mach-aaec2000/include/mach/memory.h3
-rw-r--r--arch/arm/mach-at91/include/mach/memory.h11
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h20
-rw-r--r--arch/arm/mach-davinci/include/mach/memory.h6
-rw-r--r--arch/arm/mach-ebsa110/include/mach/memory.h7
-rw-r--r--arch/arm/mach-ep93xx/include/mach/memory.h4
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h9
-rw-r--r--arch/arm/mach-h720x/include/mach/memory.h17
-rw-r--r--arch/arm/mach-imx/include/mach/memory.h10
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h9
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h16
-rw-r--r--arch/arm/mach-iop32x/include/mach/memory.h11
-rw-r--r--arch/arm/mach-iop33x/include/mach/memory.h11
-rw-r--r--arch/arm/mach-ixp2000/include/mach/memory.h7
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h13
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/memory.h12
-rw-r--r--arch/arm/mach-kirkwood/include/mach/memory.h4
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h5
-rw-r--r--arch/arm/mach-l7200/include/mach/memory.h3
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/memory.h10
-rw-r--r--arch/arm/mach-loki/include/mach/memory.h4
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h4
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/memory.h4
-rw-r--r--arch/arm/mach-netx/include/mach/memory.h10
-rw-r--r--arch/arm/mach-ns9xxx/include/mach/memory.h3
-rw-r--r--arch/arm/mach-orion5x/include/mach/memory.h4
-rw-r--r--arch/arm/mach-pnx4008/include/mach/memory.h3
-rw-r--r--arch/arm/mach-pxa/include/mach/memory.h10
-rw-r--r--arch/arm/mach-pxa/mioa701.c2
-rw-r--r--arch/arm/mach-pxa/mioa701_bootresume.S1
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h10
-rw-r--r--arch/arm/mach-rpc/include/mach/memory.h7
-rw-r--r--arch/arm/mach-s3c2400/include/mach/memory.h3
-rw-r--r--arch/arm/mach-s3c2410/include/mach/memory.h3
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h12
-rw-r--r--arch/arm/mach-shark/include/mach/memory.h3
-rw-r--r--arch/arm/mach-versatile/include/mach/memory.h10
-rw-r--r--arch/arm/mm/copypage-feroceon.S95
-rw-r--r--arch/arm/mm/copypage-feroceon.c111
-rw-r--r--arch/arm/mm/copypage-v3.S67
-rw-r--r--arch/arm/mm/copypage-v3.c81
-rw-r--r--arch/arm/mm/copypage-v4mc.c53
-rw-r--r--arch/arm/mm/copypage-v4wb.S79
-rw-r--r--arch/arm/mm/copypage-v4wb.c94
-rw-r--r--arch/arm/mm/copypage-v4wt.S73
-rw-r--r--arch/arm/mm/copypage-v4wt.c88
-rw-r--r--arch/arm/mm/copypage-v6.c84
-rw-r--r--arch/arm/mm/copypage-xsc3.S97
-rw-r--r--arch/arm/mm/copypage-xsc3.c113
-rw-r--r--arch/arm/mm/copypage-xscale.c47
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/mm/init.c57
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c114
-rw-r--r--arch/arm/mm/nommu.c18
-rw-r--r--arch/arm/mm/proc-syms.c4
-rw-r--r--arch/arm/plat-mxc/include/mach/memory.h13
-rw-r--r--arch/arm/plat-omap/gpio.c5
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h17
-rw-r--r--arch/arm/plat-omap/include/mach/pm.h2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c14
-rw-r--r--drivers/video/omap/Makefile1
-rw-r--r--drivers/video/omap/lcd_sx1.c327
-rw-r--r--include/linux/highmem.h2
68 files changed, 771 insertions, 1254 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 77764301844b..0202a7c20e62 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -112,10 +112,8 @@
112 * private definitions which should NOT be used outside memory.h 112 * private definitions which should NOT be used outside memory.h
113 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 113 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
114 */ 114 */
115#ifndef __virt_to_phys
116#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) 115#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
117#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 116#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
118#endif
119 117
120/* 118/*
121 * Convert a physical address to a Page Frame Number and back 119 * Convert a physical address to a Page Frame Number and back
@@ -180,6 +178,11 @@ static inline void *phys_to_virt(unsigned long x)
180 * memory. Use of these is *deprecated* (and that doesn't mean 178 * memory. Use of these is *deprecated* (and that doesn't mean
181 * use the __ prefixed forms instead.) See dma-mapping.h. 179 * use the __ prefixed forms instead.) See dma-mapping.h.
182 */ 180 */
181#ifndef __virt_to_bus
182#define __virt_to_bus __virt_to_phys
183#define __bus_to_virt __phys_to_virt
184#endif
185
183static inline __deprecated unsigned long virt_to_bus(void *x) 186static inline __deprecated unsigned long virt_to_bus(void *x)
184{ 187{
185 return __virt_to_bus((unsigned long)x); 188 return __virt_to_bus((unsigned long)x);
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 5fee45e23038..f341c9dbd662 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -108,30 +108,36 @@
108#error Unknown user operations model 108#error Unknown user operations model
109#endif 109#endif
110 110
111struct page;
112
111struct cpu_user_fns { 113struct cpu_user_fns {
112 void (*cpu_clear_user_page)(void *p, unsigned long user); 114 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
113 void (*cpu_copy_user_page)(void *to, const void *from, 115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
114 unsigned long user); 116 unsigned long vaddr);
115}; 117};
116 118
117#ifdef MULTI_USER 119#ifdef MULTI_USER
118extern struct cpu_user_fns cpu_user; 120extern struct cpu_user_fns cpu_user;
119 121
120#define __cpu_clear_user_page cpu_user.cpu_clear_user_page 122#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage
121#define __cpu_copy_user_page cpu_user.cpu_copy_user_page 123#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
122 124
123#else 125#else
124 126
125#define __cpu_clear_user_page __glue(_USER,_clear_user_page) 127#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage)
126#define __cpu_copy_user_page __glue(_USER,_copy_user_page) 128#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)
127 129
128extern void __cpu_clear_user_page(void *p, unsigned long user); 130extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
129extern void __cpu_copy_user_page(void *to, const void *from, 131extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
130 unsigned long user); 132 unsigned long vaddr);
131#endif 133#endif
132 134
133#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) 135#define clear_user_highpage(page,vaddr) \
134#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) 136 __cpu_clear_user_highpage(page, vaddr)
137
138#define __HAVE_ARCH_COPY_USER_HIGHPAGE
139#define copy_user_highpage(to,from,vaddr,vma) \
140 __cpu_copy_user_highpage(to, from, vaddr)
135 141
136#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 142#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
137extern void copy_page(void *to, const void *from); 143extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index a65413ba121d..f2cd18a0932b 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -209,9 +209,11 @@ struct meminfo {
209 struct membank bank[NR_BANKS]; 209 struct membank bank[NR_BANKS];
210}; 210};
211 211
212extern struct meminfo meminfo;
213
212#define for_each_nodebank(iter,mi,no) \ 214#define for_each_nodebank(iter,mi,no) \
213 for (iter = 0; iter < mi->nr_banks; iter++) \ 215 for (iter = 0; iter < (mi)->nr_banks; iter++) \
214 if (mi->bank[iter].node == no) 216 if ((mi)->bank[iter].node == no)
215 217
216#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) 218#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
217#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) 219#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d4dae3e9b294..4f6ae06d0855 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -59,7 +59,7 @@ static int __init fpe_setup(char *line)
59__setup("fpe=", fpe_setup); 59__setup("fpe=", fpe_setup);
60#endif 60#endif
61 61
62extern void paging_init(struct meminfo *, struct machine_desc *desc); 62extern void paging_init(struct machine_desc *desc);
63extern void reboot_setup(char *str); 63extern void reboot_setup(char *str);
64extern void _text, _etext, __data_start, _edata, _end; 64extern void _text, _etext, __data_start, _edata, _end;
65 65
@@ -112,7 +112,6 @@ static struct stack stacks[NR_CPUS];
112char elf_platform[ELF_PLATFORM_SIZE]; 112char elf_platform[ELF_PLATFORM_SIZE];
113EXPORT_SYMBOL(elf_platform); 113EXPORT_SYMBOL(elf_platform);
114 114
115static struct meminfo meminfo __initdata = { 0, };
116static const char *cpu_name; 115static const char *cpu_name;
117static const char *machine_name; 116static const char *machine_name;
118static char __initdata command_line[COMMAND_LINE_SIZE]; 117static char __initdata command_line[COMMAND_LINE_SIZE];
@@ -367,21 +366,34 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
367 return list; 366 return list;
368} 367}
369 368
370static void __init arm_add_memory(unsigned long start, unsigned long size) 369static int __init arm_add_memory(unsigned long start, unsigned long size)
371{ 370{
372 struct membank *bank; 371 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
372
373 if (meminfo.nr_banks >= NR_BANKS) {
374 printk(KERN_CRIT "NR_BANKS too low, "
375 "ignoring memory at %#lx\n", start);
376 return -EINVAL;
377 }
373 378
374 /* 379 /*
375 * Ensure that start/size are aligned to a page boundary. 380 * Ensure that start/size are aligned to a page boundary.
376 * Size is appropriately rounded down, start is rounded up. 381 * Size is appropriately rounded down, start is rounded up.
377 */ 382 */
378 size -= start & ~PAGE_MASK; 383 size -= start & ~PAGE_MASK;
379
380 bank = &meminfo.bank[meminfo.nr_banks++];
381
382 bank->start = PAGE_ALIGN(start); 384 bank->start = PAGE_ALIGN(start);
383 bank->size = size & PAGE_MASK; 385 bank->size = size & PAGE_MASK;
384 bank->node = PHYS_TO_NID(start); 386 bank->node = PHYS_TO_NID(start);
387
388 /*
389 * Check whether this memory region has non-zero size or
390 * invalid node number.
391 */
392 if (bank->size == 0 || bank->node >= MAX_NUMNODES)
393 return -EINVAL;
394
395 meminfo.nr_banks++;
396 return 0;
385} 397}
386 398
387/* 399/*
@@ -539,14 +551,7 @@ __tagtable(ATAG_CORE, parse_tag_core);
539 551
540static int __init parse_tag_mem32(const struct tag *tag) 552static int __init parse_tag_mem32(const struct tag *tag)
541{ 553{
542 if (meminfo.nr_banks >= NR_BANKS) { 554 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
543 printk(KERN_WARNING
544 "Ignoring memory bank 0x%08x size %dKB\n",
545 tag->u.mem.start, tag->u.mem.size / 1024);
546 return -EINVAL;
547 }
548 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
549 return 0;
550} 555}
551 556
552__tagtable(ATAG_MEM, parse_tag_mem32); 557__tagtable(ATAG_MEM, parse_tag_mem32);
@@ -718,7 +723,7 @@ void __init setup_arch(char **cmdline_p)
718 memcpy(boot_command_line, from, COMMAND_LINE_SIZE); 723 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
719 boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; 724 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
720 parse_cmdline(cmdline_p, from); 725 parse_cmdline(cmdline_p, from);
721 paging_init(&meminfo, mdesc); 726 paging_init(mdesc);
722 request_standard_resources(&meminfo, mdesc); 727 request_standard_resources(&meminfo, mdesc);
723 728
724#ifdef CONFIG_SMP 729#ifdef CONFIG_SMP
diff --git a/arch/arm/mach-aaec2000/include/mach/memory.h b/arch/arm/mach-aaec2000/include/mach/memory.h
index 56ae900a482e..c00822543d9f 100644
--- a/arch/arm/mach-aaec2000/include/mach/memory.h
+++ b/arch/arm/mach-aaec2000/include/mach/memory.h
@@ -14,9 +14,6 @@
14 14
15#define PHYS_OFFSET UL(0xf0000000) 15#define PHYS_OFFSET UL(0xf0000000)
16 16
17#define __virt_to_bus(x) __virt_to_phys(x)
18#define __bus_to_virt(x) __phys_to_virt(x)
19
20/* 17/*
21 * The nodes are the followings: 18 * The nodes are the followings:
22 * 19 *
diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h
index 9dd1b8c79b08..14f4ef4b6a9e 100644
--- a/arch/arm/mach-at91/include/mach/memory.h
+++ b/arch/arm/mach-at91/include/mach/memory.h
@@ -25,15 +25,4 @@
25 25
26#define PHYS_OFFSET (AT91_SDRAM_BASE) 26#define PHYS_OFFSET (AT91_SDRAM_BASE)
27 27
28
29/*
30 * Virtual view <-> DMA view memory address translations
31 * virt_to_bus: Used to translate the virtual address to an
32 * address suitable to be passed to set_dma_addr
33 * bus_to_virt: Used to convert an address for DMA operations
34 * to an address that the kernel can use.
35 */
36#define __virt_to_bus(x) __virt_to_phys(x)
37#define __bus_to_virt(x) __phys_to_virt(x)
38
39#endif 28#endif
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index 98ec30c97bbe..e522b20bcbc2 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -26,25 +26,7 @@
26 */ 26 */
27#define PHYS_OFFSET UL(0xc0000000) 27#define PHYS_OFFSET UL(0xc0000000)
28 28
29/* 29#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
30 * Virtual view <-> DMA view memory address translations
31 * virt_to_bus: Used to translate the virtual address to an
32 * address suitable to be passed to set_dma_addr
33 * bus_to_virt: Used to convert an address for DMA operations
34 * to an address that the kernel can use.
35 */
36
37#if defined(CONFIG_ARCH_CDB89712)
38
39#define __virt_to_bus(x) (x)
40#define __bus_to_virt(x) (x)
41
42#elif defined (CONFIG_ARCH_AUTCPU12)
43
44#define __virt_to_bus(x) (x)
45#define __bus_to_virt(x) (x)
46
47#else
48 30
49#define __virt_to_bus(x) ((x) - PAGE_OFFSET) 31#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
50#define __bus_to_virt(x) ((x) + PAGE_OFFSET) 32#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index dd1625c23cf4..3a3353357bac 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -55,10 +55,4 @@ __arch_adjust_zones(int node, unsigned long *size, unsigned long *holes)
55 55
56#endif 56#endif
57 57
58/*
59 * Bus address is physical address
60 */
61#define __virt_to_bus(x) __virt_to_phys(x)
62#define __bus_to_virt(x) __phys_to_virt(x)
63
64#endif /* __ASM_ARCH_MEMORY_H */ 58#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h
index eea4b75b657b..0ca66d080c69 100644
--- a/arch/arm/mach-ebsa110/include/mach/memory.h
+++ b/arch/arm/mach-ebsa110/include/mach/memory.h
@@ -22,13 +22,6 @@
22#define PHYS_OFFSET UL(0x00000000) 22#define PHYS_OFFSET UL(0x00000000)
23 23
24/* 24/*
25 * We keep this 1:1 so that we don't interfere
26 * with the PCMCIA memory regions
27 */
28#define __virt_to_bus(x) (x)
29#define __bus_to_virt(x) (x)
30
31/*
32 * Cache flushing area - SRAM 25 * Cache flushing area - SRAM
33 */ 26 */
34#define FLUSH_BASE_PHYS 0x40000000 27#define FLUSH_BASE_PHYS 0x40000000
diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h
index f1b633590752..5c80c3c8158d 100644
--- a/arch/arm/mach-ep93xx/include/mach/memory.h
+++ b/arch/arm/mach-ep93xx/include/mach/memory.h
@@ -7,8 +7,4 @@
7 7
8#define PHYS_OFFSET UL(0x00000000) 8#define PHYS_OFFSET UL(0x00000000)
9 9
10#define __bus_to_virt(x) __phys_to_virt(x)
11#define __virt_to_bus(x) __virt_to_phys(x)
12
13
14#endif 10#endif
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index 6ae2f1a07ab9..cb16e59d87b6 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -30,9 +30,18 @@
30extern unsigned long __virt_to_bus(unsigned long); 30extern unsigned long __virt_to_bus(unsigned long);
31extern unsigned long __bus_to_virt(unsigned long); 31extern unsigned long __bus_to_virt(unsigned long);
32#endif 32#endif
33#define __virt_to_bus __virt_to_bus
34#define __bus_to_virt __bus_to_virt
33 35
34#elif defined(CONFIG_FOOTBRIDGE_HOST) 36#elif defined(CONFIG_FOOTBRIDGE_HOST)
35 37
38/*
39 * The footbridge is programmed to expose the system RAM at the corresponding
40 * address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000.
41 * If 0x80000000, then its exposed at 0xa0000000 on the bus. etc.
42 * The only requirement is that the RAM isn't placed at bus address 0 which
43 * would clash with VGA cards.
44 */
36#define __virt_to_bus(x) ((x) - 0xe0000000) 45#define __virt_to_bus(x) ((x) - 0xe0000000)
37#define __bus_to_virt(x) ((x) + 0xe0000000) 46#define __bus_to_virt(x) ((x) + 0xe0000000)
38 47
diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h
index cb26f49cc4e1..83a2fa090e88 100644
--- a/arch/arm/mach-h720x/include/mach/memory.h
+++ b/arch/arm/mach-h720x/include/mach/memory.h
@@ -7,23 +7,6 @@
7#ifndef __ASM_ARCH_MEMORY_H 7#ifndef __ASM_ARCH_MEMORY_H
8#define __ASM_ARCH_MEMORY_H 8#define __ASM_ARCH_MEMORY_H
9 9
10/*
11 * Page offset:
12 * ( 0xc0000000UL )
13 */
14#define PHYS_OFFSET UL(0x40000000) 10#define PHYS_OFFSET UL(0x40000000)
15 11
16/*
17 * Virtual view <-> DMA view memory address translations
18 * virt_to_bus: Used to translate the virtual address to an
19 * address suitable to be passed to set_dma_addr
20 * bus_to_virt: Used to convert an address for DMA operations
21 * to an address that the kernel can use.
22 *
23 * There is something to do here later !, Mar 2000, Jungjun Kim
24 */
25
26#define __virt_to_bus(x) __virt_to_phys(x)
27#define __bus_to_virt(x) __phys_to_virt(x)
28
29#endif 12#endif
diff --git a/arch/arm/mach-imx/include/mach/memory.h b/arch/arm/mach-imx/include/mach/memory.h
index 5c453063c0ed..a93df7cba694 100644
--- a/arch/arm/mach-imx/include/mach/memory.h
+++ b/arch/arm/mach-imx/include/mach/memory.h
@@ -23,14 +23,4 @@
23 23
24#define PHYS_OFFSET UL(0x08000000) 24#define PHYS_OFFSET UL(0x08000000)
25 25
26/*
27 * Virtual view <-> DMA view memory address translations
28 * virt_to_bus: Used to translate the virtual address to an
29 * address suitable to be passed to set_dma_addr
30 * bus_to_virt: Used to convert an address for DMA operations
31 * to an address that the kernel can use.
32 */
33#define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET)
34#define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET)
35
36#endif 26#endif
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index be7e63c21d25..30d41d0e7d04 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -24,15 +24,8 @@
24 * Physical DRAM offset. 24 * Physical DRAM offset.
25 */ 25 */
26#define PHYS_OFFSET UL(0x00000000) 26#define PHYS_OFFSET UL(0x00000000)
27#define BUS_OFFSET UL(0x80000000)
28 27
29/* 28#define BUS_OFFSET UL(0x80000000)
30 * Virtual view <-> DMA view memory address translations
31 * virt_to_bus: Used to translate the virtual address to an
32 * address suitable to be passed to set_dma_addr
33 * bus_to_virt: Used to convert an address for DMA operations
34 * to an address that the kernel can use.
35 */
36#define __virt_to_bus(x) (x - PAGE_OFFSET + BUS_OFFSET) 29#define __virt_to_bus(x) (x - PAGE_OFFSET + BUS_OFFSET)
37#define __bus_to_virt(x) (x - BUS_OFFSET + PAGE_OFFSET) 30#define __bus_to_virt(x) (x - BUS_OFFSET + PAGE_OFFSET)
38 31
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index b82602d529bf..e012bf13c955 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -16,18 +16,6 @@
16#define IOP13XX_PMMR_P_START (IOP13XX_PMMR_PHYS_MEM_BASE) 16#define IOP13XX_PMMR_P_START (IOP13XX_PMMR_PHYS_MEM_BASE)
17#define IOP13XX_PMMR_P_END (IOP13XX_PMMR_PHYS_MEM_BASE + IOP13XX_PMMR_SIZE) 17#define IOP13XX_PMMR_P_END (IOP13XX_PMMR_PHYS_MEM_BASE + IOP13XX_PMMR_SIZE)
18 18
19/*
20 * Virtual view <-> PCI DMA view memory address translations
21 * virt_to_bus: Used to translate the virtual address to an
22 * address suitable to be passed to set_dma_addr
23 * bus_to_virt: Used to convert an address for DMA operations
24 * to an address that the kernel can use.
25 */
26
27/* RAM has 1:1 mapping on the PCIe/x Busses */
28#define __virt_to_bus(x) (__virt_to_phys(x))
29#define __bus_to_virt(x) (__phys_to_virt(x))
30
31static inline dma_addr_t __virt_to_lbus(unsigned long x) 19static inline dma_addr_t __virt_to_lbus(unsigned long x)
32{ 20{
33 return x + IOP13XX_PMMR_PHYS_MEM_BASE - IOP13XX_PMMR_VIRT_MEM_BASE; 21 return x + IOP13XX_PMMR_PHYS_MEM_BASE - IOP13XX_PMMR_VIRT_MEM_BASE;
@@ -55,7 +43,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
55 if (is_lbus_device(dev) && __is_lbus_dma(__dma)) \ 43 if (is_lbus_device(dev) && __is_lbus_dma(__dma)) \
56 __virt = __lbus_to_virt(__dma); \ 44 __virt = __lbus_to_virt(__dma); \
57 else \ 45 else \
58 __virt = __bus_to_virt(__dma); \ 46 __virt = __phys_to_virt(__dma); \
59 (void *)__virt; \ 47 (void *)__virt; \
60 }) 48 })
61 49
@@ -66,7 +54,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
66 if (is_lbus_device(dev) && __is_lbus_virt(__virt)) \ 54 if (is_lbus_device(dev) && __is_lbus_virt(__virt)) \
67 __dma = __virt_to_lbus(__virt); \ 55 __dma = __virt_to_lbus(__virt); \
68 else \ 56 else \
69 __dma = __virt_to_bus(__virt); \ 57 __dma = __virt_to_phys(__virt); \
70 __dma; \ 58 __dma; \
71 }) 59 })
72 60
diff --git a/arch/arm/mach-iop32x/include/mach/memory.h b/arch/arm/mach-iop32x/include/mach/memory.h
index 42cd4bf3148c..61c7bdb26a81 100644
--- a/arch/arm/mach-iop32x/include/mach/memory.h
+++ b/arch/arm/mach-iop32x/include/mach/memory.h
@@ -12,15 +12,4 @@
12 */ 12 */
13#define PHYS_OFFSET UL(0xa0000000) 13#define PHYS_OFFSET UL(0xa0000000)
14 14
15/*
16 * Virtual view <-> PCI DMA view memory address translations
17 * virt_to_bus: Used to translate the virtual address to an
18 * address suitable to be passed to set_dma_addr
19 * bus_to_virt: Used to convert an address for DMA operations
20 * to an address that the kernel can use.
21 */
22#define __virt_to_bus(x) (__virt_to_phys(x))
23#define __bus_to_virt(x) (__phys_to_virt(x))
24
25
26#endif 15#endif
diff --git a/arch/arm/mach-iop33x/include/mach/memory.h b/arch/arm/mach-iop33x/include/mach/memory.h
index 2cef0bbb354f..c46c6ba30186 100644
--- a/arch/arm/mach-iop33x/include/mach/memory.h
+++ b/arch/arm/mach-iop33x/include/mach/memory.h
@@ -12,15 +12,4 @@
12 */ 12 */
13#define PHYS_OFFSET UL(0x00000000) 13#define PHYS_OFFSET UL(0x00000000)
14 14
15/*
16 * Virtual view <-> PCI DMA view memory address translations
17 * virt_to_bus: Used to translate the virtual address to an
18 * address suitable to be passed to set_dma_addr
19 * bus_to_virt: Used to convert an address for DMA operations
20 * to an address that the kernel can use.
21 */
22#define __virt_to_bus(x) (__virt_to_phys(x))
23#define __bus_to_virt(x) (__phys_to_virt(x))
24
25
26#endif 15#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h
index 241529a7c52d..aee7eb8a71b2 100644
--- a/arch/arm/mach-ixp2000/include/mach/memory.h
+++ b/arch/arm/mach-ixp2000/include/mach/memory.h
@@ -15,13 +15,6 @@
15 15
16#define PHYS_OFFSET UL(0x00000000) 16#define PHYS_OFFSET UL(0x00000000)
17 17
18/*
19 * Virtual view <-> DMA view memory address translations
20 * virt_to_bus: Used to translate the virtual address to an
21 * address suitable to be passed to set_dma_addr
22 * bus_to_virt: Used to convert an address for DMA operations
23 * to an address that the kernel can use.
24 */
25#include <mach/ixp2000-regs.h> 18#include <mach/ixp2000-regs.h>
26 19
27#define __virt_to_bus(v) \ 20#define __virt_to_bus(v) \
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index 9d40115f7ebe..fdd138706c70 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -19,16 +19,6 @@
19 */ 19 */
20#define PHYS_OFFSET (0x00000000) 20#define PHYS_OFFSET (0x00000000)
21 21
22
23/*
24 * Virtual view <-> DMA view memory address translations
25 * virt_to_bus: Used to translate the virtual address to an
26 * address suitable to be passed to set_dma_addr
27 * bus_to_virt: Used to convert an address for DMA operations
28 * to an address that the kernel can use.
29 */
30#ifndef __ASSEMBLY__
31
32#define __virt_to_bus(v) \ 22#define __virt_to_bus(v) \
33 ({ unsigned int ret; \ 23 ({ unsigned int ret; \
34 ret = ((__virt_to_phys(v) - 0x00000000) + \ 24 ret = ((__virt_to_phys(v) - 0x00000000) + \
@@ -43,6 +33,3 @@
43#define arch_is_coherent() 1 33#define arch_is_coherent() 1
44 34
45#endif 35#endif
46
47
48#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h
index c4d2830ac987..2e481db0ca58 100644
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -25,16 +25,4 @@ void ixp4xx_adjust_zones(int node, unsigned long *size, unsigned long *holes);
25 25
26#endif 26#endif
27 27
28/*
29 * Virtual view <-> DMA view memory address translations
30 * virt_to_bus: Used to translate the virtual address to an
31 * address suitable to be passed to set_dma_addr
32 * bus_to_virt: Used to convert an address for DMA operations
33 * to an address that the kernel can use.
34 *
35 * These are dummies for now.
36 */
37#define __virt_to_bus(x) __virt_to_phys(x)
38#define __bus_to_virt(x) __phys_to_virt(x)
39
40#endif 28#endif
diff --git a/arch/arm/mach-kirkwood/include/mach/memory.h b/arch/arm/mach-kirkwood/include/mach/memory.h
index b5fb34bdccd5..45431e131465 100644
--- a/arch/arm/mach-kirkwood/include/mach/memory.h
+++ b/arch/arm/mach-kirkwood/include/mach/memory.h
@@ -7,8 +7,4 @@
7 7
8#define PHYS_OFFSET UL(0x00000000) 8#define PHYS_OFFSET UL(0x00000000)
9 9
10#define __virt_to_bus(x) __virt_to_phys(x)
11#define __bus_to_virt(x) __phys_to_virt(x)
12
13
14#endif 10#endif
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 8fbc4c76c38b..6d5887cf5742 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -37,11 +37,6 @@ extern struct bus_type platform_bus_type;
37 (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); }) 37 (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
38#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x)) 38#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
39 39
40#else
41
42#define __virt_to_bus(x) __virt_to_phys(x)
43#define __bus_to_virt(x) __phys_to_virt(x)
44
45#endif 40#endif
46 41
47#endif 42#endif
diff --git a/arch/arm/mach-l7200/include/mach/memory.h b/arch/arm/mach-l7200/include/mach/memory.h
index f338cf3ffd93..9fb40ed2f03b 100644
--- a/arch/arm/mach-l7200/include/mach/memory.h
+++ b/arch/arm/mach-l7200/include/mach/memory.h
@@ -17,9 +17,6 @@
17 */ 17 */
18#define PHYS_OFFSET UL(0xf0000000) 18#define PHYS_OFFSET UL(0xf0000000)
19 19
20#define __virt_to_bus(x) __virt_to_phys(x)
21#define __bus_to_virt(x) __phys_to_virt(x)
22
23/* 20/*
24 * Cache flushing area - ROM 21 * Cache flushing area - ROM
25 */ 22 */
diff --git a/arch/arm/mach-lh7a40x/include/mach/memory.h b/arch/arm/mach-lh7a40x/include/mach/memory.h
index 1da14ff66c93..189d20e543e7 100644
--- a/arch/arm/mach-lh7a40x/include/mach/memory.h
+++ b/arch/arm/mach-lh7a40x/include/mach/memory.h
@@ -19,16 +19,6 @@
19 */ 19 */
20#define PHYS_OFFSET UL(0xc0000000) 20#define PHYS_OFFSET UL(0xc0000000)
21 21
22/*
23 * Virtual view <-> DMA view memory address translations
24 * virt_to_bus: Used to translate the virtual address to an
25 * address suitable to be passed to set_dma_addr
26 * bus_to_virt: Used to convert an address for DMA operations
27 * to an address that the kernel can use.
28 */
29#define __virt_to_bus(x) __virt_to_phys(x)
30#define __bus_to_virt(x) __phys_to_virt(x)
31
32#ifdef CONFIG_DISCONTIGMEM 22#ifdef CONFIG_DISCONTIGMEM
33 23
34/* 24/*
diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h
index a39533ab489d..2ed7e6e732c2 100644
--- a/arch/arm/mach-loki/include/mach/memory.h
+++ b/arch/arm/mach-loki/include/mach/memory.h
@@ -7,8 +7,4 @@
7 7
8#define PHYS_OFFSET UL(0x00000000) 8#define PHYS_OFFSET UL(0x00000000)
9 9
10#define __virt_to_bus(x) __virt_to_phys(x)
11#define __bus_to_virt(x) __phys_to_virt(x)
12
13
14#endif 10#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 63fd47f2e62e..f4698baec976 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -19,9 +19,5 @@
19/* physical offset of RAM */ 19/* physical offset of RAM */
20#define PHYS_OFFSET UL(0x10000000) 20#define PHYS_OFFSET UL(0x10000000)
21 21
22/* bus address and physical addresses are identical */
23#define __virt_to_bus(x) __virt_to_phys(x)
24#define __bus_to_virt(x) __phys_to_virt(x)
25
26#endif 22#endif
27 23
diff --git a/arch/arm/mach-mv78xx0/include/mach/memory.h b/arch/arm/mach-mv78xx0/include/mach/memory.h
index 9e47a140ff7a..e663042d307f 100644
--- a/arch/arm/mach-mv78xx0/include/mach/memory.h
+++ b/arch/arm/mach-mv78xx0/include/mach/memory.h
@@ -7,8 +7,4 @@
7 7
8#define PHYS_OFFSET UL(0x00000000) 8#define PHYS_OFFSET UL(0x00000000)
9 9
10#define __virt_to_bus(x) __virt_to_phys(x)
11#define __bus_to_virt(x) __phys_to_virt(x)
12
13
14#endif 10#endif
diff --git a/arch/arm/mach-netx/include/mach/memory.h b/arch/arm/mach-netx/include/mach/memory.h
index 53745a1378de..9a363f297f90 100644
--- a/arch/arm/mach-netx/include/mach/memory.h
+++ b/arch/arm/mach-netx/include/mach/memory.h
@@ -22,15 +22,5 @@
22 22
23#define PHYS_OFFSET UL(0x80000000) 23#define PHYS_OFFSET UL(0x80000000)
24 24
25/*
26 * Virtual view <-> DMA view memory address translations
27 * virt_to_bus: Used to translate the virtual address to an
28 * address suitable to be passed to set_dma_addr
29 * bus_to_virt: Used to convert an address for DMA operations
30 * to an address that the kernel can use.
31 */
32#define __virt_to_bus(x) __virt_to_phys(x)
33#define __bus_to_virt(x) __phys_to_virt(x)
34
35#endif 25#endif
36 26
diff --git a/arch/arm/mach-ns9xxx/include/mach/memory.h b/arch/arm/mach-ns9xxx/include/mach/memory.h
index 649ee6235b94..6107193adbfe 100644
--- a/arch/arm/mach-ns9xxx/include/mach/memory.h
+++ b/arch/arm/mach-ns9xxx/include/mach/memory.h
@@ -21,7 +21,4 @@
21 21
22#define PHYS_OFFSET UL(0x00000000) 22#define PHYS_OFFSET UL(0x00000000)
23 23
24#define __virt_to_bus(x) __virt_to_phys(x)
25#define __bus_to_virt(x) __phys_to_virt(x)
26
27#endif 24#endif
diff --git a/arch/arm/mach-orion5x/include/mach/memory.h b/arch/arm/mach-orion5x/include/mach/memory.h
index 54dd76b013f2..52a2955d0f87 100644
--- a/arch/arm/mach-orion5x/include/mach/memory.h
+++ b/arch/arm/mach-orion5x/include/mach/memory.h
@@ -9,8 +9,4 @@
9 9
10#define PHYS_OFFSET UL(0x00000000) 10#define PHYS_OFFSET UL(0x00000000)
11 11
12#define __virt_to_bus(x) __virt_to_phys(x)
13#define __bus_to_virt(x) __phys_to_virt(x)
14
15
16#endif 12#endif
diff --git a/arch/arm/mach-pnx4008/include/mach/memory.h b/arch/arm/mach-pnx4008/include/mach/memory.h
index 5789a2d16f5a..b38d50c156c4 100644
--- a/arch/arm/mach-pnx4008/include/mach/memory.h
+++ b/arch/arm/mach-pnx4008/include/mach/memory.h
@@ -18,7 +18,4 @@
18 */ 18 */
19#define PHYS_OFFSET (0x80000000) 19#define PHYS_OFFSET (0x80000000)
20 20
21#define __virt_to_bus(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
22#define __bus_to_virt(x) ((x) + PAGE_OFFSET - PHYS_OFFSET)
23
24#endif 21#endif
diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h
index 59aef89808d6..eac491c2d741 100644
--- a/arch/arm/mach-pxa/include/mach/memory.h
+++ b/arch/arm/mach-pxa/include/mach/memory.h
@@ -18,16 +18,6 @@
18#define PHYS_OFFSET UL(0xa0000000) 18#define PHYS_OFFSET UL(0xa0000000)
19 19
20/* 20/*
21 * Virtual view <-> DMA view memory address translations
22 * virt_to_bus: Used to translate the virtual address to an
23 * address suitable to be passed to set_dma_addr
24 * bus_to_virt: Used to convert an address for DMA operations
25 * to an address that the kernel can use.
26 */
27#define __virt_to_bus(x) __virt_to_phys(x)
28#define __bus_to_virt(x) __phys_to_virt(x)
29
30/*
31 * The nodes are matched with the physical SDRAM banks as follows: 21 * The nodes are matched with the physical SDRAM banks as follows:
32 * 22 *
33 * node 0: 0xa0000000-0xa3ffffff --> 0xc0000000-0xc3ffffff 23 * node 0: 0xa0000000-0xa3ffffff --> 0xc0000000-0xc3ffffff
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 0842c531ee4d..782903fe9c6c 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -565,7 +565,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state)
565 u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); 565 u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR);
566 566
567 /* Devices prepare suspend */ 567 /* Devices prepare suspend */
568 is_bt_on = gpio_get_value(GPIO83_BT_ON); 568 is_bt_on = !!gpio_get_value(GPIO83_BT_ON);
569 pxa2xx_mfp_set_lpm(GPIO83_BT_ON, 569 pxa2xx_mfp_set_lpm(GPIO83_BT_ON,
570 is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW); 570 is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW);
571 571
diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S
index a647693d9856..324d25a48c85 100644
--- a/arch/arm/mach-pxa/mioa701_bootresume.S
+++ b/arch/arm/mach-pxa/mioa701_bootresume.S
@@ -24,6 +24,7 @@ ENTRY(mioa701_jumpaddr)
241: 241:
25 mov r0, #0xa0000000 @ Don't suppose memory access works 25 mov r0, #0xa0000000 @ Don't suppose memory access works
26 orr r0, r0, #0x00200000 @ even if it's supposed to 26 orr r0, r0, #0x00200000 @ even if it's supposed to
27 orr r0, r0, #0x0000b000
27 mov r1, #0 28 mov r1, #0
28 str r1, [r0] @ Early disable resume for next boot 29 str r1, [r0] @ Early disable resume for next boot
29 ldr r0, mioa701_jumpaddr @ (Murphy's Law) 30 ldr r0, mioa701_jumpaddr @ (Murphy's Law)
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index 0e673483a141..65a0742094f7 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -25,14 +25,4 @@
25 */ 25 */
26#define PHYS_OFFSET UL(0x00000000) 26#define PHYS_OFFSET UL(0x00000000)
27 27
28/*
29 * Virtual view <-> DMA view memory address translations
30 * virt_to_bus: Used to translate the virtual address to an
31 * address suitable to be passed to set_dma_addr
32 * bus_to_virt: Used to convert an address for DMA operations
33 * to an address that the kernel can use.
34 */
35#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
36#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
37
38#endif 28#endif
diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h
index 9bf7e43e2863..78191bf25192 100644
--- a/arch/arm/mach-rpc/include/mach/memory.h
+++ b/arch/arm/mach-rpc/include/mach/memory.h
@@ -24,13 +24,6 @@
24#define PHYS_OFFSET UL(0x10000000) 24#define PHYS_OFFSET UL(0x10000000)
25 25
26/* 26/*
27 * These are exactly the same on the RiscPC as the
28 * physical memory view.
29 */
30#define __virt_to_bus(x) __virt_to_phys(x)
31#define __bus_to_virt(x) __phys_to_virt(x)
32
33/*
34 * Cache flushing area - ROM 27 * Cache flushing area - ROM
35 */ 28 */
36#define FLUSH_BASE_PHYS 0x00000000 29#define FLUSH_BASE_PHYS 0x00000000
diff --git a/arch/arm/mach-s3c2400/include/mach/memory.h b/arch/arm/mach-s3c2400/include/mach/memory.h
index 8f4878e4f591..cf5901ffd385 100644
--- a/arch/arm/mach-s3c2400/include/mach/memory.h
+++ b/arch/arm/mach-s3c2400/include/mach/memory.h
@@ -17,7 +17,4 @@
17 17
18#define PHYS_OFFSET UL(0x0C000000) 18#define PHYS_OFFSET UL(0x0C000000)
19 19
20#define __virt_to_bus(x) __virt_to_phys(x)
21#define __bus_to_virt(x) __phys_to_virt(x)
22
23#endif 20#endif
diff --git a/arch/arm/mach-s3c2410/include/mach/memory.h b/arch/arm/mach-s3c2410/include/mach/memory.h
index 93782628a786..6f1e5871ae4b 100644
--- a/arch/arm/mach-s3c2410/include/mach/memory.h
+++ b/arch/arm/mach-s3c2410/include/mach/memory.h
@@ -13,7 +13,4 @@
13 13
14#define PHYS_OFFSET UL(0x30000000) 14#define PHYS_OFFSET UL(0x30000000)
15 15
16#define __virt_to_bus(x) __virt_to_phys(x)
17#define __bus_to_virt(x) __phys_to_virt(x)
18
19#endif 16#endif
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index 1c127b68581d..6984034f6958 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -28,18 +28,6 @@ void sa1111_adjust_zones(int node, unsigned long *size, unsigned long *holes);
28#endif 28#endif
29 29
30/* 30/*
31 * Virtual view <-> DMA view memory address translations
32 * virt_to_bus: Used to translate the virtual address to an
33 * address suitable to be passed to set_dma_addr
34 * bus_to_virt: Used to convert an address for DMA operations
35 * to an address that the kernel can use.
36 *
37 * On the SA1100, bus addresses are equivalent to physical addresses.
38 */
39#define __virt_to_bus(x) __virt_to_phys(x)
40#define __bus_to_virt(x) __phys_to_virt(x)
41
42/*
43 * Because of the wide memory address space between physical RAM banks on the 31 * Because of the wide memory address space between physical RAM banks on the
44 * SA1100, it's much convenient to use Linux's SparseMEM support to implement 32 * SA1100, it's much convenient to use Linux's SparseMEM support to implement
45 * our memory map representation. Assuming all memory nodes have equal access 33 * our memory map representation. Assuming all memory nodes have equal access
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h
index b7874ad9f9f6..d00c05eabd52 100644
--- a/arch/arm/mach-shark/include/mach/memory.h
+++ b/arch/arm/mach-shark/include/mach/memory.h
@@ -36,9 +36,6 @@ static inline void __arch_adjust_zones(int node, unsigned long *zone_size, unsig
36 36
37#endif 37#endif
38 38
39#define __virt_to_bus(x) __virt_to_phys(x)
40#define __bus_to_virt(x) __phys_to_virt(x)
41
42/* 39/*
43 * Cache flushing area 40 * Cache flushing area
44 */ 41 */
diff --git a/arch/arm/mach-versatile/include/mach/memory.h b/arch/arm/mach-versatile/include/mach/memory.h
index b6315c0602ac..79aeab86b903 100644
--- a/arch/arm/mach-versatile/include/mach/memory.h
+++ b/arch/arm/mach-versatile/include/mach/memory.h
@@ -25,14 +25,4 @@
25 */ 25 */
26#define PHYS_OFFSET UL(0x00000000) 26#define PHYS_OFFSET UL(0x00000000)
27 27
28/*
29 * Virtual view <-> DMA view memory address translations
30 * virt_to_bus: Used to translate the virtual address to an
31 * address suitable to be passed to set_dma_addr
32 * bus_to_virt: Used to convert an address for DMA operations
33 * to an address that the kernel can use.
34 */
35#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
36#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
37
38#endif 28#endif
diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S
deleted file mode 100644
index 7eb0d320d240..000000000000
--- a/arch/arm/mm/copypage-feroceon.S
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage-feroceon.S
3 *
4 * Copyright (C) 2008 Marvell Semiconductors
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles copy_user_page and clear_user_page on Feroceon
11 * more optimally than the generic implementations.
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/asm-offsets.h>
16
17 .text
18 .align 5
19
20ENTRY(feroceon_copy_user_page)
21 stmfd sp!, {r4-r9, lr}
22 mov ip, #PAGE_SZ
231: mov lr, r1
24 ldmia r1!, {r2 - r9}
25 pld [lr, #32]
26 pld [lr, #64]
27 pld [lr, #96]
28 pld [lr, #128]
29 pld [lr, #160]
30 pld [lr, #192]
31 pld [lr, #224]
32 stmia r0, {r2 - r9}
33 ldmia r1!, {r2 - r9}
34 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
35 add r0, r0, #32
36 stmia r0, {r2 - r9}
37 ldmia r1!, {r2 - r9}
38 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
39 add r0, r0, #32
40 stmia r0, {r2 - r9}
41 ldmia r1!, {r2 - r9}
42 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
43 add r0, r0, #32
44 stmia r0, {r2 - r9}
45 ldmia r1!, {r2 - r9}
46 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
47 add r0, r0, #32
48 stmia r0, {r2 - r9}
49 ldmia r1!, {r2 - r9}
50 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
51 add r0, r0, #32
52 stmia r0, {r2 - r9}
53 ldmia r1!, {r2 - r9}
54 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
55 add r0, r0, #32
56 stmia r0, {r2 - r9}
57 ldmia r1!, {r2 - r9}
58 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
59 add r0, r0, #32
60 stmia r0, {r2 - r9}
61 subs ip, ip, #(32 * 8)
62 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
63 add r0, r0, #32
64 bne 1b
65 mcr p15, 0, ip, c7, c10, 4 @ drain WB
66 ldmfd sp!, {r4-r9, pc}
67
68 .align 5
69
70ENTRY(feroceon_clear_user_page)
71 stmfd sp!, {r4-r7, lr}
72 mov r1, #PAGE_SZ/32
73 mov r2, #0
74 mov r3, #0
75 mov r4, #0
76 mov r5, #0
77 mov r6, #0
78 mov r7, #0
79 mov ip, #0
80 mov lr, #0
811: stmia r0, {r2-r7, ip, lr}
82 subs r1, r1, #1
83 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
84 add r0, r0, #32
85 bne 1b
86 mcr p15, 0, r1, c7, c10, 4 @ drain WB
87 ldmfd sp!, {r4-r7, pc}
88
89 __INITDATA
90
91 .type feroceon_user_fns, #object
92ENTRY(feroceon_user_fns)
93 .long feroceon_clear_user_page
94 .long feroceon_copy_user_page
95 .size feroceon_user_fns, . - feroceon_user_fns
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
new file mode 100644
index 000000000000..c3ba6a94da0c
--- /dev/null
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -0,0 +1,111 @@
1/*
2 * linux/arch/arm/mm/copypage-feroceon.S
3 *
4 * Copyright (C) 2008 Marvell Semiconductors
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles copy_user_highpage and clear_user_page on Feroceon
11 * more optimally than the generic implementations.
12 */
13#include <linux/init.h>
14#include <linux/highmem.h>
15
16static void __attribute__((naked))
17feroceon_copy_user_page(void *kto, const void *kfrom)
18{
19 asm("\
20 stmfd sp!, {r4-r9, lr} \n\
21 mov ip, %0 \n\
221: mov lr, r1 \n\
23 ldmia r1!, {r2 - r9} \n\
24 pld [lr, #32] \n\
25 pld [lr, #64] \n\
26 pld [lr, #96] \n\
27 pld [lr, #128] \n\
28 pld [lr, #160] \n\
29 pld [lr, #192] \n\
30 pld [lr, #224] \n\
31 stmia r0, {r2 - r9} \n\
32 ldmia r1!, {r2 - r9} \n\
33 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
34 add r0, r0, #32 \n\
35 stmia r0, {r2 - r9} \n\
36 ldmia r1!, {r2 - r9} \n\
37 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
38 add r0, r0, #32 \n\
39 stmia r0, {r2 - r9} \n\
40 ldmia r1!, {r2 - r9} \n\
41 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
42 add r0, r0, #32 \n\
43 stmia r0, {r2 - r9} \n\
44 ldmia r1!, {r2 - r9} \n\
45 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
46 add r0, r0, #32 \n\
47 stmia r0, {r2 - r9} \n\
48 ldmia r1!, {r2 - r9} \n\
49 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
50 add r0, r0, #32 \n\
51 stmia r0, {r2 - r9} \n\
52 ldmia r1!, {r2 - r9} \n\
53 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
54 add r0, r0, #32 \n\
55 stmia r0, {r2 - r9} \n\
56 ldmia r1!, {r2 - r9} \n\
57 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
58 add r0, r0, #32 \n\
59 stmia r0, {r2 - r9} \n\
60 subs ip, ip, #(32 * 8) \n\
61 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
62 add r0, r0, #32 \n\
63 bne 1b \n\
64 mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
65 ldmfd sp!, {r4-r9, pc}"
66 :
67 : "I" (PAGE_SIZE));
68}
69
70void feroceon_copy_user_highpage(struct page *to, struct page *from,
71 unsigned long vaddr)
72{
73 void *kto, *kfrom;
74
75 kto = kmap_atomic(to, KM_USER0);
76 kfrom = kmap_atomic(from, KM_USER1);
77 feroceon_copy_user_page(kto, kfrom);
78 kunmap_atomic(kfrom, KM_USER1);
79 kunmap_atomic(kto, KM_USER0);
80}
81
82void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
83{
84 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
85 asm volatile ("\
86 mov r1, %2 \n\
87 mov r2, #0 \n\
88 mov r3, #0 \n\
89 mov r4, #0 \n\
90 mov r5, #0 \n\
91 mov r6, #0 \n\
92 mov r7, #0 \n\
93 mov ip, #0 \n\
94 mov lr, #0 \n\
951: stmia %0, {r2-r7, ip, lr} \n\
96 subs r1, r1, #1 \n\
97 mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
98 add %0, %0, #32 \n\
99 bne 1b \n\
100 mcr p15, 0, r1, c7, c10, 4 @ drain WB"
101 : "=r" (ptr)
102 : "0" (kaddr), "I" (PAGE_SIZE / 32)
103 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
104 kunmap_atomic(kaddr, KM_USER0);
105}
106
107struct cpu_user_fns feroceon_user_fns __initdata = {
108 .cpu_clear_user_highpage = feroceon_clear_user_highpage,
109 .cpu_copy_user_highpage = feroceon_copy_user_highpage,
110};
111
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S
deleted file mode 100644
index 2ee394b11bcb..000000000000
--- a/arch/arm/mm/copypage-v3.S
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage.S
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16
17 .text
18 .align 5
19/*
20 * ARMv3 optimised copy_user_page
21 *
22 * FIXME: do we need to handle cache stuff...
23 */
24ENTRY(v3_copy_user_page)
25 stmfd sp!, {r4, lr} @ 2
26 mov r2, #PAGE_SZ/64 @ 1
27 ldmia r1!, {r3, r4, ip, lr} @ 4+1
281: stmia r0!, {r3, r4, ip, lr} @ 4
29 ldmia r1!, {r3, r4, ip, lr} @ 4+1
30 stmia r0!, {r3, r4, ip, lr} @ 4
31 ldmia r1!, {r3, r4, ip, lr} @ 4+1
32 stmia r0!, {r3, r4, ip, lr} @ 4
33 ldmia r1!, {r3, r4, ip, lr} @ 4
34 subs r2, r2, #1 @ 1
35 stmia r0!, {r3, r4, ip, lr} @ 4
36 ldmneia r1!, {r3, r4, ip, lr} @ 4
37 bne 1b @ 1
38 ldmfd sp!, {r4, pc} @ 3
39
40 .align 5
41/*
42 * ARMv3 optimised clear_user_page
43 *
44 * FIXME: do we need to handle cache stuff...
45 */
46ENTRY(v3_clear_user_page)
47 str lr, [sp, #-4]!
48 mov r1, #PAGE_SZ/64 @ 1
49 mov r2, #0 @ 1
50 mov r3, #0 @ 1
51 mov ip, #0 @ 1
52 mov lr, #0 @ 1
531: stmia r0!, {r2, r3, ip, lr} @ 4
54 stmia r0!, {r2, r3, ip, lr} @ 4
55 stmia r0!, {r2, r3, ip, lr} @ 4
56 stmia r0!, {r2, r3, ip, lr} @ 4
57 subs r1, r1, #1 @ 1
58 bne 1b @ 1
59 ldr pc, [sp], #4
60
61 __INITDATA
62
63 .type v3_user_fns, #object
64ENTRY(v3_user_fns)
65 .long v3_clear_user_page
66 .long v3_copy_user_page
67 .size v3_user_fns, . - v3_user_fns
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
new file mode 100644
index 000000000000..70ed96c8af8e
--- /dev/null
+++ b/arch/arm/mm/copypage-v3.c
@@ -0,0 +1,81 @@
1/*
2 * linux/arch/arm/mm/copypage-v3.c
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/highmem.h>
12
13/*
14 * ARMv3 optimised copy_user_highpage
15 *
16 * FIXME: do we need to handle cache stuff...
17 */
18static void __attribute__((naked))
19v3_copy_user_page(void *kto, const void *kfrom)
20{
21 asm("\n\
22 stmfd sp!, {r4, lr} @ 2\n\
23 mov r2, %2 @ 1\n\
24 ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
251: stmia %1!, {r3, r4, ip, lr} @ 4\n\
26 ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
27 stmia %1!, {r3, r4, ip, lr} @ 4\n\
28 ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
29 stmia %1!, {r3, r4, ip, lr} @ 4\n\
30 ldmia %0!, {r3, r4, ip, lr} @ 4\n\
31 subs r2, r2, #1 @ 1\n\
32 stmia %1!, {r3, r4, ip, lr} @ 4\n\
33 ldmneia %0!, {r3, r4, ip, lr} @ 4\n\
34 bne 1b @ 1\n\
35 ldmfd sp!, {r4, pc} @ 3"
36 :
37 : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
38}
39
40void v3_copy_user_highpage(struct page *to, struct page *from,
41 unsigned long vaddr)
42{
43 void *kto, *kfrom;
44
45 kto = kmap_atomic(to, KM_USER0);
46 kfrom = kmap_atomic(from, KM_USER1);
47 v3_copy_user_page(kto, kfrom);
48 kunmap_atomic(kfrom, KM_USER1);
49 kunmap_atomic(kto, KM_USER0);
50}
51
52/*
53 * ARMv3 optimised clear_user_page
54 *
55 * FIXME: do we need to handle cache stuff...
56 */
57void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
58{
59 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
60 asm volatile("\n\
61 mov r1, %2 @ 1\n\
62 mov r2, #0 @ 1\n\
63 mov r3, #0 @ 1\n\
64 mov ip, #0 @ 1\n\
65 mov lr, #0 @ 1\n\
661: stmia %0!, {r2, r3, ip, lr} @ 4\n\
67 stmia %0!, {r2, r3, ip, lr} @ 4\n\
68 stmia %0!, {r2, r3, ip, lr} @ 4\n\
69 stmia %0!, {r2, r3, ip, lr} @ 4\n\
70 subs r1, r1, #1 @ 1\n\
71 bne 1b @ 1"
72 : "=r" (ptr)
73 : "0" (kaddr), "I" (PAGE_SIZE / 64)
74 : "r1", "r2", "r3", "ip", "lr");
75 kunmap_atomic(kaddr, KM_USER0);
76}
77
78struct cpu_user_fns v3_user_fns __initdata = {
79 .cpu_clear_user_highpage = v3_clear_user_highpage,
80 .cpu_copy_user_highpage = v3_copy_user_highpage,
81};
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 8d33e2549344..bdb5fd983b15 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -15,8 +15,8 @@
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/highmem.h>
18 19
19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -33,7 +33,7 @@
33static DEFINE_SPINLOCK(minicache_lock); 33static DEFINE_SPINLOCK(minicache_lock);
34 34
35/* 35/*
36 * ARMv4 mini-dcache optimised copy_user_page 36 * ARMv4 mini-dcache optimised copy_user_highpage
37 * 37 *
38 * We flush the destination cache lines just before we write the data into the 38 * We flush the destination cache lines just before we write the data into the
39 * corresponding address. Since the Dcache is read-allocate, this removes the 39 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
42 * 42 *
43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
44 * instruction. If your processor does not supply this, you have to write your 44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_page that does the right thing. 45 * own copy_user_highpage that does the right thing.
46 */ 46 */
47static void __attribute__((naked)) 47static void __attribute__((naked))
48mc_copy_user_page(void *from, void *to) 48mc_copy_user_page(void *from, void *to)
@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to)
68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); 68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
69} 69}
70 70
71void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 71void v4_mc_copy_user_highpage(struct page *from, struct page *to,
72 unsigned long vaddr)
72{ 73{
73 struct page *page = virt_to_page(kfrom); 74 void *kto = kmap_atomic(to, KM_USER1);
74 75
75 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 76 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
76 __flush_dcache_page(page_mapping(page), page); 77 __flush_dcache_page(page_mapping(from), from);
77 78
78 spin_lock(&minicache_lock); 79 spin_lock(&minicache_lock);
79 80
80 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
81 flush_tlb_kernel_page(0xffff8000); 82 flush_tlb_kernel_page(0xffff8000);
82 83
83 mc_copy_user_page((void *)0xffff8000, kto); 84 mc_copy_user_page((void *)0xffff8000, kto);
84 85
85 spin_unlock(&minicache_lock); 86 spin_unlock(&minicache_lock);
87
88 kunmap_atomic(kto, KM_USER1);
86} 89}
87 90
88/* 91/*
89 * ARMv4 optimised clear_user_page 92 * ARMv4 optimised clear_user_page
90 */ 93 */
91void __attribute__((naked)) 94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
92v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
93{ 95{
94 asm volatile( 96 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
95 "str lr, [sp, #-4]!\n\ 97 asm volatile("\
96 mov r1, %0 @ 1\n\ 98 mov r1, %2 @ 1\n\
97 mov r2, #0 @ 1\n\ 99 mov r2, #0 @ 1\n\
98 mov r3, #0 @ 1\n\ 100 mov r3, #0 @ 1\n\
99 mov ip, #0 @ 1\n\ 101 mov ip, #0 @ 1\n\
100 mov lr, #0 @ 1\n\ 102 mov lr, #0 @ 1\n\
1011: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 1031: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
102 stmia r0!, {r2, r3, ip, lr} @ 4\n\ 104 stmia %0!, {r2, r3, ip, lr} @ 4\n\
103 stmia r0!, {r2, r3, ip, lr} @ 4\n\ 105 stmia %0!, {r2, r3, ip, lr} @ 4\n\
104 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 106 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
105 stmia r0!, {r2, r3, ip, lr} @ 4\n\ 107 stmia %0!, {r2, r3, ip, lr} @ 4\n\
106 stmia r0!, {r2, r3, ip, lr} @ 4\n\ 108 stmia %0!, {r2, r3, ip, lr} @ 4\n\
107 subs r1, r1, #1 @ 1\n\ 109 subs r1, r1, #1 @ 1\n\
108 bne 1b @ 1\n\ 110 bne 1b @ 1"
109 ldr pc, [sp], #4" 111 : "=r" (ptr)
110 : 112 : "0" (kaddr), "I" (PAGE_SIZE / 64)
111 : "I" (PAGE_SIZE / 64)); 113 : "r1", "r2", "r3", "ip", "lr");
114 kunmap_atomic(kaddr, KM_USER0);
112} 115}
113 116
114struct cpu_user_fns v4_mc_user_fns __initdata = { 117struct cpu_user_fns v4_mc_user_fns __initdata = {
115 .cpu_clear_user_page = v4_mc_clear_user_page, 118 .cpu_clear_user_highpage = v4_mc_clear_user_highpage,
116 .cpu_copy_user_page = v4_mc_copy_user_page, 119 .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
117}; 120};
diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S
deleted file mode 100644
index 83117354b1cd..000000000000
--- a/arch/arm/mm/copypage-v4wb.S
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage.S
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 5
18/*
19 * ARMv4 optimised copy_user_page
20 *
21 * We flush the destination cache lines just before we write the data into the
22 * corresponding address. Since the Dcache is read-allocate, this removes the
23 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
24 * and merged as appropriate.
25 *
26 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
27 * instruction. If your processor does not supply this, you have to write your
28 * own copy_user_page that does the right thing.
29 */
30ENTRY(v4wb_copy_user_page)
31 stmfd sp!, {r4, lr} @ 2
32 mov r2, #PAGE_SZ/64 @ 1
33 ldmia r1!, {r3, r4, ip, lr} @ 4
341: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
35 stmia r0!, {r3, r4, ip, lr} @ 4
36 ldmia r1!, {r3, r4, ip, lr} @ 4+1
37 stmia r0!, {r3, r4, ip, lr} @ 4
38 ldmia r1!, {r3, r4, ip, lr} @ 4
39 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
40 stmia r0!, {r3, r4, ip, lr} @ 4
41 ldmia r1!, {r3, r4, ip, lr} @ 4
42 subs r2, r2, #1 @ 1
43 stmia r0!, {r3, r4, ip, lr} @ 4
44 ldmneia r1!, {r3, r4, ip, lr} @ 4
45 bne 1b @ 1
46 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
47 ldmfd sp!, {r4, pc} @ 3
48
49 .align 5
50/*
51 * ARMv4 optimised clear_user_page
52 *
53 * Same story as above.
54 */
55ENTRY(v4wb_clear_user_page)
56 str lr, [sp, #-4]!
57 mov r1, #PAGE_SZ/64 @ 1
58 mov r2, #0 @ 1
59 mov r3, #0 @ 1
60 mov ip, #0 @ 1
61 mov lr, #0 @ 1
621: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
63 stmia r0!, {r2, r3, ip, lr} @ 4
64 stmia r0!, {r2, r3, ip, lr} @ 4
65 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
66 stmia r0!, {r2, r3, ip, lr} @ 4
67 stmia r0!, {r2, r3, ip, lr} @ 4
68 subs r1, r1, #1 @ 1
69 bne 1b @ 1
70 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
71 ldr pc, [sp], #4
72
73 __INITDATA
74
75 .type v4wb_user_fns, #object
76ENTRY(v4wb_user_fns)
77 .long v4wb_clear_user_page
78 .long v4wb_copy_user_page
79 .size v4wb_user_fns, . - v4wb_user_fns
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
new file mode 100644
index 000000000000..3ec93dab7656
--- /dev/null
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -0,0 +1,94 @@
1/*
2 * linux/arch/arm/mm/copypage-v4wb.c
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/highmem.h>
12
13/*
14 * ARMv4 optimised copy_user_highpage
15 *
16 * We flush the destination cache lines just before we write the data into the
17 * corresponding address. Since the Dcache is read-allocate, this removes the
18 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
19 * and merged as appropriate.
20 *
21 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
22 * instruction. If your processor does not supply this, you have to write your
23 * own copy_user_highpage that does the right thing.
24 */
25static void __attribute__((naked))
26v4wb_copy_user_page(void *kto, const void *kfrom)
27{
28 asm("\
29 stmfd sp!, {r4, lr} @ 2\n\
30 mov r2, %0 @ 1\n\
31 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
321: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
33 stmia r0!, {r3, r4, ip, lr} @ 4\n\
34 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
35 stmia r0!, {r3, r4, ip, lr} @ 4\n\
36 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
37 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
38 stmia r0!, {r3, r4, ip, lr} @ 4\n\
39 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
40 subs r2, r2, #1 @ 1\n\
41 stmia r0!, {r3, r4, ip, lr} @ 4\n\
42 ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
43 bne 1b @ 1\n\
44 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
45 ldmfd sp!, {r4, pc} @ 3"
46 :
47 : "I" (PAGE_SIZE / 64));
48}
49
50void v4wb_copy_user_highpage(struct page *to, struct page *from,
51 unsigned long vaddr)
52{
53 void *kto, *kfrom;
54
55 kto = kmap_atomic(to, KM_USER0);
56 kfrom = kmap_atomic(from, KM_USER1);
57 v4wb_copy_user_page(kto, kfrom);
58 kunmap_atomic(kfrom, KM_USER1);
59 kunmap_atomic(kto, KM_USER0);
60}
61
62/*
63 * ARMv4 optimised clear_user_page
64 *
65 * Same story as above.
66 */
67void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
68{
69 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
70 asm volatile("\
71 mov r1, %2 @ 1\n\
72 mov r2, #0 @ 1\n\
73 mov r3, #0 @ 1\n\
74 mov ip, #0 @ 1\n\
75 mov lr, #0 @ 1\n\
761: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
77 stmia %0!, {r2, r3, ip, lr} @ 4\n\
78 stmia %0!, {r2, r3, ip, lr} @ 4\n\
79 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
80 stmia %0!, {r2, r3, ip, lr} @ 4\n\
81 stmia %0!, {r2, r3, ip, lr} @ 4\n\
82 subs r1, r1, #1 @ 1\n\
83 bne 1b @ 1\n\
84 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
85 : "=r" (ptr)
86 : "0" (kaddr), "I" (PAGE_SIZE / 64)
87 : "r1", "r2", "r3", "ip", "lr");
88 kunmap_atomic(kaddr, KM_USER0);
89}
90
91struct cpu_user_fns v4wb_user_fns __initdata = {
92 .cpu_clear_user_highpage = v4wb_clear_user_highpage,
93 .cpu_copy_user_highpage = v4wb_copy_user_highpage,
94};
diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S
deleted file mode 100644
index e1f2af28d549..000000000000
--- a/arch/arm/mm/copypage-v4wt.S
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage-v4.S
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 *
12 * This is for CPUs with a writethrough cache and 'flush ID cache' is
13 * the only supported cache operation.
14 */
15#include <linux/linkage.h>
16#include <linux/init.h>
17#include <asm/asm-offsets.h>
18
19 .text
20 .align 5
21/*
22 * ARMv4 optimised copy_user_page
23 *
24 * Since we have writethrough caches, we don't have to worry about
25 * dirty data in the cache. However, we do have to ensure that
26 * subsequent reads are up to date.
27 */
28ENTRY(v4wt_copy_user_page)
29 stmfd sp!, {r4, lr} @ 2
30 mov r2, #PAGE_SZ/64 @ 1
31 ldmia r1!, {r3, r4, ip, lr} @ 4
321: stmia r0!, {r3, r4, ip, lr} @ 4
33 ldmia r1!, {r3, r4, ip, lr} @ 4+1
34 stmia r0!, {r3, r4, ip, lr} @ 4
35 ldmia r1!, {r3, r4, ip, lr} @ 4
36 stmia r0!, {r3, r4, ip, lr} @ 4
37 ldmia r1!, {r3, r4, ip, lr} @ 4
38 subs r2, r2, #1 @ 1
39 stmia r0!, {r3, r4, ip, lr} @ 4
40 ldmneia r1!, {r3, r4, ip, lr} @ 4
41 bne 1b @ 1
42 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
43 ldmfd sp!, {r4, pc} @ 3
44
45 .align 5
46/*
47 * ARMv4 optimised clear_user_page
48 *
49 * Same story as above.
50 */
51ENTRY(v4wt_clear_user_page)
52 str lr, [sp, #-4]!
53 mov r1, #PAGE_SZ/64 @ 1
54 mov r2, #0 @ 1
55 mov r3, #0 @ 1
56 mov ip, #0 @ 1
57 mov lr, #0 @ 1
581: stmia r0!, {r2, r3, ip, lr} @ 4
59 stmia r0!, {r2, r3, ip, lr} @ 4
60 stmia r0!, {r2, r3, ip, lr} @ 4
61 stmia r0!, {r2, r3, ip, lr} @ 4
62 subs r1, r1, #1 @ 1
63 bne 1b @ 1
64 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
65 ldr pc, [sp], #4
66
67 __INITDATA
68
69 .type v4wt_user_fns, #object
70ENTRY(v4wt_user_fns)
71 .long v4wt_clear_user_page
72 .long v4wt_copy_user_page
73 .size v4wt_user_fns, . - v4wt_user_fns
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
new file mode 100644
index 000000000000..0f1188efae45
--- /dev/null
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -0,0 +1,88 @@
1/*
2 * linux/arch/arm/mm/copypage-v4wt.S
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This is for CPUs with a writethrough cache and 'flush ID cache' is
11 * the only supported cache operation.
12 */
13#include <linux/init.h>
14#include <linux/highmem.h>
15
16/*
17 * ARMv4 optimised copy_user_highpage
18 *
19 * Since we have writethrough caches, we don't have to worry about
20 * dirty data in the cache. However, we do have to ensure that
21 * subsequent reads are up to date.
22 */
23static void __attribute__((naked))
24v4wt_copy_user_page(void *kto, const void *kfrom)
25{
26 asm("\
27 stmfd sp!, {r4, lr} @ 2\n\
28 mov r2, %0 @ 1\n\
29 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
301: stmia r0!, {r3, r4, ip, lr} @ 4\n\
31 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
32 stmia r0!, {r3, r4, ip, lr} @ 4\n\
33 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
34 stmia r0!, {r3, r4, ip, lr} @ 4\n\
35 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
36 subs r2, r2, #1 @ 1\n\
37 stmia r0!, {r3, r4, ip, lr} @ 4\n\
38 ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
39 bne 1b @ 1\n\
40 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
41 ldmfd sp!, {r4, pc} @ 3"
42 :
43 : "I" (PAGE_SIZE / 64));
44}
45
46void v4wt_copy_user_highpage(struct page *to, struct page *from,
47 unsigned long vaddr)
48{
49 void *kto, *kfrom;
50
51 kto = kmap_atomic(to, KM_USER0);
52 kfrom = kmap_atomic(from, KM_USER1);
53 v4wt_copy_user_page(kto, kfrom);
54 kunmap_atomic(kfrom, KM_USER1);
55 kunmap_atomic(kto, KM_USER0);
56}
57
58/*
59 * ARMv4 optimised clear_user_page
60 *
61 * Same story as above.
62 */
63void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
64{
65 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
66 asm volatile("\
67 mov r1, %2 @ 1\n\
68 mov r2, #0 @ 1\n\
69 mov r3, #0 @ 1\n\
70 mov ip, #0 @ 1\n\
71 mov lr, #0 @ 1\n\
721: stmia %0!, {r2, r3, ip, lr} @ 4\n\
73 stmia %0!, {r2, r3, ip, lr} @ 4\n\
74 stmia %0!, {r2, r3, ip, lr} @ 4\n\
75 stmia %0!, {r2, r3, ip, lr} @ 4\n\
76 subs r1, r1, #1 @ 1\n\
77 bne 1b @ 1\n\
78 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
79 : "=r" (ptr)
80 : "0" (kaddr), "I" (PAGE_SIZE / 64)
81 : "r1", "r2", "r3", "ip", "lr");
82 kunmap_atomic(kaddr, KM_USER0);
83}
84
85struct cpu_user_fns v4wt_user_fns __initdata = {
86 .cpu_clear_user_highpage = v4wt_clear_user_highpage,
87 .cpu_copy_user_highpage = v4wt_copy_user_highpage,
88};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0e21c0767580..4127a7bddfe5 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -10,8 +10,8 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/highmem.h>
13 14
14#include <asm/page.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock);
33 * Copy the user page. No aliasing to deal with so we can just 33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages. 34 * attack the kernel's existing mapping of these pages.
35 */ 35 */
36static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) 36static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr)
37{ 38{
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
38 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0);
39} 46}
40 47
41/* 48/*
42 * Clear the user page. No aliasing to deal with so we can just 49 * Clear the user page. No aliasing to deal with so we can just
43 * attack the kernel's existing mapping of this page. 50 * attack the kernel's existing mapping of this page.
44 */ 51 */
45static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) 52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
46{ 53{
54 void *kaddr = kmap_atomic(page, KM_USER0);
47 clear_page(kaddr); 55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0);
48} 57}
49 58
50/* 59/*
51 * Copy the page, taking account of the cache colour. 60 * Discard data in the kernel mapping for the new page.
61 * FIXME: needs this MCRR to be supported.
52 */ 62 */
53static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 63static void discard_old_kernel_data(void *kto)
54{ 64{
55 unsigned int offset = CACHE_COLOUR(vaddr);
56 unsigned long from, to;
57 struct page *page = virt_to_page(kfrom);
58
59 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
60 __flush_dcache_page(page_mapping(page), page);
61
62 /*
63 * Discard data in the kernel mapping for the new page.
64 * FIXME: needs this MCRR to be supported.
65 */
66 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 65 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
67 : 66 :
68 : "r" (kto), 67 : "r" (kto),
69 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 68 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
70 : "cc"); 69 : "cc");
70}
71
72/*
73 * Copy the page, taking account of the cache colour.
74 */
75static void v6_copy_user_highpage_aliasing(struct page *to,
76 struct page *from, unsigned long vaddr)
77{
78 unsigned int offset = CACHE_COLOUR(vaddr);
79 unsigned long kfrom, kto;
80
81 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
82 __flush_dcache_page(page_mapping(from), from);
83
84 /* FIXME: not highmem safe */
85 discard_old_kernel_data(page_address(to));
71 86
72 /* 87 /*
73 * Now copy the page using the same cache colour as the 88 * Now copy the page using the same cache colour as the
@@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
75 */ 90 */
76 spin_lock(&v6_lock); 91 spin_lock(&v6_lock);
77 92
78 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); 93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
79 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); 94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
80 95
81 from = from_address + (offset << PAGE_SHIFT); 96 kfrom = from_address + (offset << PAGE_SHIFT);
82 to = to_address + (offset << PAGE_SHIFT); 97 kto = to_address + (offset << PAGE_SHIFT);
83 98
84 flush_tlb_kernel_page(from); 99 flush_tlb_kernel_page(kfrom);
85 flush_tlb_kernel_page(to); 100 flush_tlb_kernel_page(kto);
86 101
87 copy_page((void *)to, (void *)from); 102 copy_page((void *)kto, (void *)kfrom);
88 103
89 spin_unlock(&v6_lock); 104 spin_unlock(&v6_lock);
90} 105}
@@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
94 * so remap the kernel page into the same cache colour as the user 109 * so remap the kernel page into the same cache colour as the user
95 * page. 110 * page.
96 */ 111 */
97static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 112static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
98{ 113{
99 unsigned int offset = CACHE_COLOUR(vaddr); 114 unsigned int offset = CACHE_COLOUR(vaddr);
100 unsigned long to = to_address + (offset << PAGE_SHIFT); 115 unsigned long to = to_address + (offset << PAGE_SHIFT);
101 116
102 /* 117 /* FIXME: not highmem safe */
103 * Discard data in the kernel mapping for the new page 118 discard_old_kernel_data(page_address(page));
104 * FIXME: needs this MCRR to be supported.
105 */
106 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
107 :
108 : "r" (kaddr),
109 "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
110 : "cc");
111 119
112 /* 120 /*
113 * Now clear the page using the same cache colour as 121 * Now clear the page using the same cache colour as
@@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
115 */ 123 */
116 spin_lock(&v6_lock); 124 spin_lock(&v6_lock);
117 125
118 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); 126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
119 flush_tlb_kernel_page(to); 127 flush_tlb_kernel_page(to);
120 clear_page((void *)to); 128 clear_page((void *)to);
121 129
@@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
123} 131}
124 132
125struct cpu_user_fns v6_user_fns __initdata = { 133struct cpu_user_fns v6_user_fns __initdata = {
126 .cpu_clear_user_page = v6_clear_user_page_nonaliasing, 134 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
127 .cpu_copy_user_page = v6_copy_user_page_nonaliasing, 135 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
128}; 136};
129 137
130static int __init v6_userpage_init(void) 138static int __init v6_userpage_init(void)
131{ 139{
132 if (cache_is_vipt_aliasing()) { 140 if (cache_is_vipt_aliasing()) {
133 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 141 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
134 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 142 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
135 } 143 }
136 144
137 return 0; 145 return 0;
diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S
deleted file mode 100644
index 9a2cb4332b4c..000000000000
--- a/arch/arm/mm/copypage-xsc3.S
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage-xsc3.S
3 *
4 * Copyright (C) 2004 Intel Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Adapted for 3rd gen XScale core, no more mini-dcache
11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12 */
13
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <asm/asm-offsets.h>
17
18/*
19 * General note:
20 * We don't really want write-allocate cache behaviour for these functions
21 * since that will just eat through 8K of the cache.
22 */
23
24 .text
25 .align 5
26/*
27 * XSC3 optimised copy_user_page
28 * r0 = destination
29 * r1 = source
30 * r2 = virtual user address of ultimate destination page
31 *
32 * The source page may have some clean entries in the cache already, but we
33 * can safely ignore them - break_cow() will flush them out of the cache
34 * if we eventually end up using our copied page.
35 *
36 */
37ENTRY(xsc3_mc_copy_user_page)
38 stmfd sp!, {r4, r5, lr}
39 mov lr, #PAGE_SZ/64-1
40
41 pld [r1, #0]
42 pld [r1, #32]
431: pld [r1, #64]
44 pld [r1, #96]
45
462: ldrd r2, [r1], #8
47 mov ip, r0
48 ldrd r4, [r1], #8
49 mcr p15, 0, ip, c7, c6, 1 @ invalidate
50 strd r2, [r0], #8
51 ldrd r2, [r1], #8
52 strd r4, [r0], #8
53 ldrd r4, [r1], #8
54 strd r2, [r0], #8
55 strd r4, [r0], #8
56 ldrd r2, [r1], #8
57 mov ip, r0
58 ldrd r4, [r1], #8
59 mcr p15, 0, ip, c7, c6, 1 @ invalidate
60 strd r2, [r0], #8
61 ldrd r2, [r1], #8
62 subs lr, lr, #1
63 strd r4, [r0], #8
64 ldrd r4, [r1], #8
65 strd r2, [r0], #8
66 strd r4, [r0], #8
67 bgt 1b
68 beq 2b
69
70 ldmfd sp!, {r4, r5, pc}
71
72 .align 5
73/*
74 * XScale optimised clear_user_page
75 * r0 = destination
76 * r1 = virtual user address of ultimate destination page
77 */
78ENTRY(xsc3_mc_clear_user_page)
79 mov r1, #PAGE_SZ/32
80 mov r2, #0
81 mov r3, #0
821: mcr p15, 0, r0, c7, c6, 1 @ invalidate line
83 strd r2, [r0], #8
84 strd r2, [r0], #8
85 strd r2, [r0], #8
86 strd r2, [r0], #8
87 subs r1, r1, #1
88 bne 1b
89 mov pc, lr
90
91 __INITDATA
92
93 .type xsc3_mc_user_fns, #object
94ENTRY(xsc3_mc_user_fns)
95 .long xsc3_mc_clear_user_page
96 .long xsc3_mc_copy_user_page
97 .size xsc3_mc_user_fns, . - xsc3_mc_user_fns
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
new file mode 100644
index 000000000000..39a994542cad
--- /dev/null
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -0,0 +1,113 @@
1/*
2 * linux/arch/arm/mm/copypage-xsc3.S
3 *
4 * Copyright (C) 2004 Intel Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Adapted for 3rd gen XScale core, no more mini-dcache
11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12 */
13#include <linux/init.h>
14#include <linux/highmem.h>
15
16/*
17 * General note:
18 * We don't really want write-allocate cache behaviour for these functions
19 * since that will just eat through 8K of the cache.
20 */
21
22/*
23 * XSC3 optimised copy_user_highpage
24 * r0 = destination
25 * r1 = source
26 *
27 * The source page may have some clean entries in the cache already, but we
28 * can safely ignore them - break_cow() will flush them out of the cache
29 * if we eventually end up using our copied page.
30 *
31 */
32static void __attribute__((naked))
33xsc3_mc_copy_user_page(void *kto, const void *kfrom)
34{
35 asm("\
36 stmfd sp!, {r4, r5, lr} \n\
37 mov lr, %0 \n\
38 \n\
39 pld [r1, #0] \n\
40 pld [r1, #32] \n\
411: pld [r1, #64] \n\
42 pld [r1, #96] \n\
43 \n\
442: ldrd r2, [r1], #8 \n\
45 mov ip, r0 \n\
46 ldrd r4, [r1], #8 \n\
47 mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
48 strd r2, [r0], #8 \n\
49 ldrd r2, [r1], #8 \n\
50 strd r4, [r0], #8 \n\
51 ldrd r4, [r1], #8 \n\
52 strd r2, [r0], #8 \n\
53 strd r4, [r0], #8 \n\
54 ldrd r2, [r1], #8 \n\
55 mov ip, r0 \n\
56 ldrd r4, [r1], #8 \n\
57 mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
58 strd r2, [r0], #8 \n\
59 ldrd r2, [r1], #8 \n\
60 subs lr, lr, #1 \n\
61 strd r4, [r0], #8 \n\
62 ldrd r4, [r1], #8 \n\
63 strd r2, [r0], #8 \n\
64 strd r4, [r0], #8 \n\
65 bgt 1b \n\
66 beq 2b \n\
67 \n\
68 ldmfd sp!, {r4, r5, pc}"
69 :
70 : "I" (PAGE_SIZE / 64 - 1));
71}
72
73void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
74 unsigned long vaddr)
75{
76 void *kto, *kfrom;
77
78 kto = kmap_atomic(to, KM_USER0);
79 kfrom = kmap_atomic(from, KM_USER1);
80 xsc3_mc_copy_user_page(kto, kfrom);
81 kunmap_atomic(kfrom, KM_USER1);
82 kunmap_atomic(kto, KM_USER0);
83}
84
85/*
86 * XScale optimised clear_user_page
87 * r0 = destination
88 * r1 = virtual user address of ultimate destination page
89 */
90void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
91{
92 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
93 asm volatile ("\
94 mov r1, %2 \n\
95 mov r2, #0 \n\
96 mov r3, #0 \n\
971: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
98 strd r2, [%0], #8 \n\
99 strd r2, [%0], #8 \n\
100 strd r2, [%0], #8 \n\
101 strd r2, [%0], #8 \n\
102 subs r1, r1, #1 \n\
103 bne 1b"
104 : "=r" (ptr)
105 : "0" (kaddr), "I" (PAGE_SIZE / 32)
106 : "r1", "r2", "r3");
107 kunmap_atomic(kaddr, KM_USER0);
108}
109
110struct cpu_user_fns xsc3_mc_user_fns __initdata = {
111 .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
112 .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
113};
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index bad49331bbf9..d18f2397ee2d 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -15,8 +15,8 @@
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/highmem.h>
18 19
19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -35,7 +35,7 @@
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_SPINLOCK(minicache_lock);
36 36
37/* 37/*
38 * XScale mini-dcache optimised copy_user_page 38 * XScale mini-dcache optimised copy_user_highpage
39 * 39 *
40 * We flush the destination cache lines just before we write the data into the 40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the 41 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to)
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91} 91}
92 92
93void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr)
94{ 95{
95 struct page *page = virt_to_page(kfrom); 96 void *kto = kmap_atomic(to, KM_USER1);
96 97
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
98 __flush_dcache_page(page_mapping(page), page); 99 __flush_dcache_page(page_mapping(from), from);
99 100
100 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
101 102
102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
103 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
104 105
105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
106 107
107 spin_unlock(&minicache_lock); 108 spin_unlock(&minicache_lock);
109
110 kunmap_atomic(kto, KM_USER1);
108} 111}
109 112
110/* 113/*
111 * XScale optimised clear_user_page 114 * XScale optimised clear_user_page
112 */ 115 */
113void __attribute__((naked)) 116void
114xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) 117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
115{ 118{
119 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
116 asm volatile( 120 asm volatile(
117 "mov r1, %0 \n\ 121 "mov r1, %2 \n\
118 mov r2, #0 \n\ 122 mov r2, #0 \n\
119 mov r3, #0 \n\ 123 mov r3, #0 \n\
1201: mov ip, r0 \n\ 1241: mov ip, %0 \n\
121 strd r2, [r0], #8 \n\ 125 strd r2, [%0], #8 \n\
122 strd r2, [r0], #8 \n\ 126 strd r2, [%0], #8 \n\
123 strd r2, [r0], #8 \n\ 127 strd r2, [%0], #8 \n\
124 strd r2, [r0], #8 \n\ 128 strd r2, [%0], #8 \n\
125 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 129 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
126 subs r1, r1, #1 \n\ 130 subs r1, r1, #1 \n\
127 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 131 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
128 bne 1b \n\ 132 bne 1b"
129 mov pc, lr" 133 : "=r" (ptr)
130 : 134 : "0" (kaddr), "I" (PAGE_SIZE / 32)
131 : "I" (PAGE_SIZE / 32)); 135 : "r1", "r2", "r3", "ip");
136 kunmap_atomic(kaddr, KM_USER0);
132} 137}
133 138
134struct cpu_user_fns xscale_mc_user_fns __initdata = { 139struct cpu_user_fns xscale_mc_user_fns __initdata = {
135 .cpu_clear_user_page = xscale_mc_clear_user_page, 140 .cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
136 .cpu_copy_user_page = xscale_mc_copy_user_page, 141 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
137}; 142};
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 2df8d9facf57..ffd8b228a139 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kprobes.h> 15#include <linux/kprobes.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/page-flags.h>
17 18
18#include <asm/system.h> 19#include <asm/system.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
@@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
83 break; 84 break;
84 } 85 }
85 86
86#ifndef CONFIG_HIGHMEM
87 /* We must not map this if we have highmem enabled */ 87 /* We must not map this if we have highmem enabled */
88 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
89 break;
90
88 pte = pte_offset_map(pmd, addr); 91 pte = pte_offset_map(pmd, addr);
89 printk(", *pte=%08lx", pte_val(*pte)); 92 printk(", *pte=%08lx", pte_val(*pte));
90 printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); 93 printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
91 pte_unmap(pte); 94 pte_unmap(pte);
92#endif
93 } while(0); 95 } while(0);
94 96
95 printk("\n"); 97 printk("\n");
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 82c4b4217989..ab5c9abd5c34 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag)
64__tagtable(ATAG_INITRD2, parse_tag_initrd2); 64__tagtable(ATAG_INITRD2, parse_tag_initrd2);
65 65
66/* 66/*
67 * This is used to pass memory configuration data from paging_init 67 * This keeps memory configuration data used by a couple memory
68 * to mem_init, and by show_mem() to skip holes in the memory map. 68 * initialization functions, as well as show_mem() for the skipping
69 * of holes in the memory map. It is populated by arm_add_memory().
69 */ 70 */
70static struct meminfo meminfo = { 0, }; 71struct meminfo meminfo;
71 72
72void show_mem(void) 73void show_mem(void)
73{ 74{
@@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
331 free_area_init_node(node, zone_size, start_pfn, zhole_size); 332 free_area_init_node(node, zone_size, start_pfn, zhole_size);
332} 333}
333 334
334void __init bootmem_init(struct meminfo *mi) 335void __init bootmem_init(void)
335{ 336{
337 struct meminfo *mi = &meminfo;
336 unsigned long memend_pfn = 0; 338 unsigned long memend_pfn = 0;
337 int node, initrd_node; 339 int node, initrd_node;
338 340
339 memcpy(&meminfo, mi, sizeof(meminfo));
340
341 /* 341 /*
342 * Locate which node contains the ramdisk image, if any. 342 * Locate which node contains the ramdisk image, if any.
343 */ 343 */
@@ -394,20 +394,22 @@ void __init bootmem_init(struct meminfo *mi)
394 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; 394 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
395} 395}
396 396
397static inline void free_area(unsigned long addr, unsigned long end, char *s) 397static inline int free_area(unsigned long pfn, unsigned long end, char *s)
398{ 398{
399 unsigned int size = (end - addr) >> 10; 399 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
400 400
401 for (; addr < end; addr += PAGE_SIZE) { 401 for (; pfn < end; pfn++) {
402 struct page *page = virt_to_page(addr); 402 struct page *page = pfn_to_page(pfn);
403 ClearPageReserved(page); 403 ClearPageReserved(page);
404 init_page_count(page); 404 init_page_count(page);
405 free_page(addr); 405 __free_page(page);
406 totalram_pages++; 406 pages++;
407 } 407 }
408 408
409 if (size && s) 409 if (size && s)
410 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 410 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
411
412 return pages;
411} 413}
412 414
413static inline void 415static inline void
@@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
478 */ 480 */
479void __init mem_init(void) 481void __init mem_init(void)
480{ 482{
481 unsigned int codepages, datapages, initpages; 483 unsigned int codesize, datasize, initsize;
482 int i, node; 484 int i, node;
483 485
484 codepages = &_etext - &_text;
485 datapages = &_end - &__data_start;
486 initpages = &__init_end - &__init_begin;
487
488#ifndef CONFIG_DISCONTIGMEM 486#ifndef CONFIG_DISCONTIGMEM
489 max_mapnr = virt_to_page(high_memory) - mem_map; 487 max_mapnr = virt_to_page(high_memory) - mem_map;
490#endif 488#endif
@@ -501,7 +499,8 @@ void __init mem_init(void)
501 499
502#ifdef CONFIG_SA1111 500#ifdef CONFIG_SA1111
503 /* now that our DMA memory is actually so designated, we can free it */ 501 /* now that our DMA memory is actually so designated, we can free it */
504 free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); 502 totalram_pages += free_area(PHYS_PFN_OFFSET,
503 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
505#endif 504#endif
506 505
507 /* 506 /*
@@ -509,18 +508,21 @@ void __init mem_init(void)
509 * real number of pages we have in this system 508 * real number of pages we have in this system
510 */ 509 */
511 printk(KERN_INFO "Memory:"); 510 printk(KERN_INFO "Memory:");
512
513 num_physpages = 0; 511 num_physpages = 0;
514 for (i = 0; i < meminfo.nr_banks; i++) { 512 for (i = 0; i < meminfo.nr_banks; i++) {
515 num_physpages += bank_pfn_size(&meminfo.bank[i]); 513 num_physpages += bank_pfn_size(&meminfo.bank[i]);
516 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 514 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
517 } 515 }
518
519 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 516 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
517
518 codesize = &_etext - &_text;
519 datasize = &_end - &__data_start;
520 initsize = &__init_end - &__init_begin;
521
520 printk(KERN_NOTICE "Memory: %luKB available (%dK code, " 522 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
521 "%dK data, %dK init)\n", 523 "%dK data, %dK init)\n",
522 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 524 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
523 codepages >> 10, datapages >> 10, initpages >> 10); 525 codesize >> 10, datasize >> 10, initsize >> 10);
524 526
525 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 527 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
526 extern int sysctl_overcommit_memory; 528 extern int sysctl_overcommit_memory;
@@ -535,11 +537,10 @@ void __init mem_init(void)
535 537
536void free_initmem(void) 538void free_initmem(void)
537{ 539{
538 if (!machine_is_integrator() && !machine_is_cintegrator()) { 540 if (!machine_is_integrator() && !machine_is_cintegrator())
539 free_area((unsigned long)(&__init_begin), 541 totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)),
540 (unsigned long)(&__init_end), 542 __phys_to_pfn(__pa(&__init_end)),
541 "init"); 543 "init");
542 }
543} 544}
544 545
545#ifdef CONFIG_BLK_DEV_INITRD 546#ifdef CONFIG_BLK_DEV_INITRD
@@ -549,7 +550,9 @@ static int keep_initrd;
549void free_initrd_mem(unsigned long start, unsigned long end) 550void free_initrd_mem(unsigned long start, unsigned long end)
550{ 551{
551 if (!keep_initrd) 552 if (!keep_initrd)
552 free_area(start, end, "initrd"); 553 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
554 __phys_to_pfn(__pa(end)),
555 "initrd");
553} 556}
554 557
555static int __init keepinitrd_setup(char *__unused) 558static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 5d9f53907b4e..94367bdbb5a8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -32,7 +32,7 @@ struct meminfo;
32struct pglist_data; 32struct pglist_data;
33 33
34void __init create_mapping(struct map_desc *md); 34void __init create_mapping(struct map_desc *md);
35void __init bootmem_init(struct meminfo *mi); 35void __init bootmem_init(void);
36void reserve_node_zero(struct pglist_data *pgdat); 36void reserve_node_zero(struct pglist_data *pgdat);
37 37
38extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; 38extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f24803c1fb0b..c0b9a78d7b87 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -646,61 +646,79 @@ static void __init early_vmalloc(char **arg)
646 "vmalloc area too small, limiting to %luMB\n", 646 "vmalloc area too small, limiting to %luMB\n",
647 vmalloc_reserve >> 20); 647 vmalloc_reserve >> 20);
648 } 648 }
649
650 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
651 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
652 printk(KERN_WARNING
653 "vmalloc area is too big, limiting to %luMB\n",
654 vmalloc_reserve >> 20);
655 }
649} 656}
650__early_param("vmalloc=", early_vmalloc); 657__early_param("vmalloc=", early_vmalloc);
651 658
652#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 659#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
653 660
654static int __init check_membank_valid(struct membank *mb) 661static void __init sanity_check_meminfo(void)
655{ 662{
656 /* 663 int i, j;
657 * Check whether this memory region has non-zero size or
658 * invalid node number.
659 */
660 if (mb->size == 0 || mb->node >= MAX_NUMNODES)
661 return 0;
662
663 /*
664 * Check whether this memory region would entirely overlap
665 * the vmalloc area.
666 */
667 if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
668 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
669 "(vmalloc region overlap).\n",
670 mb->start, mb->start + mb->size - 1);
671 return 0;
672 }
673
674 /*
675 * Check whether this memory region would partially overlap
676 * the vmalloc area.
677 */
678 if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
679 phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
680 unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
681
682 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
683 "to -%.8lx (vmalloc region overlap).\n",
684 mb->start, mb->start + mb->size - 1,
685 mb->start + newsize - 1);
686 mb->size = newsize;
687 }
688 664
689 return 1; 665 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
690} 666 struct membank *bank = &meminfo.bank[j];
667 *bank = meminfo.bank[i];
691 668
692static void __init sanity_check_meminfo(struct meminfo *mi) 669#ifdef CONFIG_HIGHMEM
693{ 670 /*
694 int i, j; 671 * Split those memory banks which are partially overlapping
672 * the vmalloc area greatly simplifying things later.
673 */
674 if (__va(bank->start) < VMALLOC_MIN &&
675 bank->size > VMALLOC_MIN - __va(bank->start)) {
676 if (meminfo.nr_banks >= NR_BANKS) {
677 printk(KERN_CRIT "NR_BANKS too low, "
678 "ignoring high memory\n");
679 } else {
680 memmove(bank + 1, bank,
681 (meminfo.nr_banks - i) * sizeof(*bank));
682 meminfo.nr_banks++;
683 i++;
684 bank[1].size -= VMALLOC_MIN - __va(bank->start);
685 bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
686 j++;
687 }
688 bank->size = VMALLOC_MIN - __va(bank->start);
689 }
690#else
691 /*
692 * Check whether this memory bank would entirely overlap
693 * the vmalloc area.
694 */
695 if (__va(bank->start) >= VMALLOC_MIN) {
696 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
697 "(vmalloc region overlap).\n",
698 bank->start, bank->start + bank->size - 1);
699 continue;
700 }
695 701
696 for (i = 0, j = 0; i < mi->nr_banks; i++) { 702 /*
697 if (check_membank_valid(&mi->bank[i])) 703 * Check whether this memory bank would partially overlap
698 mi->bank[j++] = mi->bank[i]; 704 * the vmalloc area.
705 */
706 if (__va(bank->start + bank->size) > VMALLOC_MIN ||
707 __va(bank->start + bank->size) < __va(bank->start)) {
708 unsigned long newsize = VMALLOC_MIN - __va(bank->start);
709 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
710 "to -%.8lx (vmalloc region overlap).\n",
711 bank->start, bank->start + bank->size - 1,
712 bank->start + newsize - 1);
713 bank->size = newsize;
714 }
715#endif
716 j++;
699 } 717 }
700 mi->nr_banks = j; 718 meminfo.nr_banks = j;
701} 719}
702 720
703static inline void prepare_page_table(struct meminfo *mi) 721static inline void prepare_page_table(void)
704{ 722{
705 unsigned long addr; 723 unsigned long addr;
706 724
@@ -721,7 +739,7 @@ static inline void prepare_page_table(struct meminfo *mi)
721 * Clear out all the kernel space mappings, except for the first 739 * Clear out all the kernel space mappings, except for the first
722 * memory bank, up to the end of the vmalloc region. 740 * memory bank, up to the end of the vmalloc region.
723 */ 741 */
724 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); 742 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
725 addr < VMALLOC_END; addr += PGDIR_SIZE) 743 addr < VMALLOC_END; addr += PGDIR_SIZE)
726 pmd_clear(pmd_off_k(addr)); 744 pmd_clear(pmd_off_k(addr));
727} 745}
@@ -880,14 +898,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
880 * paging_init() sets up the page tables, initialises the zone memory 898 * paging_init() sets up the page tables, initialises the zone memory
881 * maps, and sets up the zero page, bad page and bad page tables. 899 * maps, and sets up the zero page, bad page and bad page tables.
882 */ 900 */
883void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 901void __init paging_init(struct machine_desc *mdesc)
884{ 902{
885 void *zero_page; 903 void *zero_page;
886 904
887 build_mem_type_table(); 905 build_mem_type_table();
888 sanity_check_meminfo(mi); 906 sanity_check_meminfo();
889 prepare_page_table(mi); 907 prepare_page_table();
890 bootmem_init(mi); 908 bootmem_init();
891 devicemaps_init(mdesc); 909 devicemaps_init(mdesc);
892 910
893 top_pmd = pmd_off_k(0xffff0000); 911 top_pmd = pmd_off_k(0xffff0000);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 07b62b238979..c085f4e8248b 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat)
41 BOOTMEM_DEFAULT); 41 BOOTMEM_DEFAULT);
42} 42}
43 43
44static void __init sanity_check_meminfo(struct meminfo *mi)
45{
46 int i, j;
47
48 for (i = 0, j = 0; i < mi->nr_banks; i++) {
49 struct membank *mb = &mi->bank[i];
50
51 if (mb->size != 0 && mb->node < MAX_NUMNODES)
52 mi->bank[j++] = mi->bank[i];
53 }
54 mi->nr_banks = j;
55}
56
57/* 44/*
58 * paging_init() sets up the page tables, initialises the zone memory 45 * paging_init() sets up the page tables, initialises the zone memory
59 * maps, and sets up the zero page, bad page and bad page tables. 46 * maps, and sets up the zero page, bad page and bad page tables.
60 */ 47 */
61void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 48void __init paging_init(struct machine_desc *mdesc)
62{ 49{
63 sanity_check_meminfo(mi); 50 bootmem_init();
64 bootmem_init(mi);
65} 51}
66 52
67/* 53/*
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 2b5ba396e3a6..4ad3bf291ad3 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache);
33 33
34#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
35#ifndef MULTI_USER 35#ifndef MULTI_USER
36EXPORT_SYMBOL(__cpu_clear_user_page); 36EXPORT_SYMBOL(__cpu_clear_user_highpage);
37EXPORT_SYMBOL(__cpu_copy_user_page); 37EXPORT_SYMBOL(__cpu_copy_user_highpage);
38#else 38#else
39EXPORT_SYMBOL(cpu_user); 39EXPORT_SYMBOL(cpu_user);
40#endif 40#endif
diff --git a/arch/arm/plat-mxc/include/mach/memory.h b/arch/arm/plat-mxc/include/mach/memory.h
index d7a8d3ebed57..203688e6164e 100644
--- a/arch/arm/plat-mxc/include/mach/memory.h
+++ b/arch/arm/plat-mxc/include/mach/memory.h
@@ -13,17 +13,4 @@
13 13
14#include <mach/hardware.h> 14#include <mach/hardware.h>
15 15
16/*
17 * Virtual view <-> DMA view memory address translations
18 * This macro is used to translate the virtual address to an address
19 * suitable to be passed to set_dma_addr()
20 */
21#define __virt_to_bus(a) __virt_to_phys(a)
22
23/*
24 * Used to convert an address for DMA operations to an address that the
25 * kernel can use.
26 */
27#define __bus_to_virt(a) __phys_to_virt(a)
28
29#endif /* __ASM_ARCH_MXC_MEMORY_H__ */ 16#endif /* __ASM_ARCH_MXC_MEMORY_H__ */
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 8679fbca6bbe..424049d83fbe 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -101,6 +101,7 @@
101#define OMAP24XX_GPIO_IRQSTATUS2 0x0028 101#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
102#define OMAP24XX_GPIO_IRQENABLE2 0x002c 102#define OMAP24XX_GPIO_IRQENABLE2 0x002c
103#define OMAP24XX_GPIO_IRQENABLE1 0x001c 103#define OMAP24XX_GPIO_IRQENABLE1 0x001c
104#define OMAP24XX_GPIO_WAKE_EN 0x0020
104#define OMAP24XX_GPIO_CTRL 0x0030 105#define OMAP24XX_GPIO_CTRL 0x0030
105#define OMAP24XX_GPIO_OE 0x0034 106#define OMAP24XX_GPIO_OE 0x0034
106#define OMAP24XX_GPIO_DATAIN 0x0038 107#define OMAP24XX_GPIO_DATAIN 0x0038
@@ -1551,7 +1552,7 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg)
1551#endif 1552#endif
1552#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) 1553#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
1553 case METHOD_GPIO_24XX: 1554 case METHOD_GPIO_24XX:
1554 wake_status = bank->base + OMAP24XX_GPIO_SETWKUENA; 1555 wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
1555 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA; 1556 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1556 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA; 1557 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1557 break; 1558 break;
@@ -1574,7 +1575,7 @@ static int omap_gpio_resume(struct sys_device *dev)
1574{ 1575{
1575 int i; 1576 int i;
1576 1577
1577 if (!cpu_is_omap24xx() && !cpu_is_omap16xx()) 1578 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1578 return 0; 1579 return 0;
1579 1580
1580 for (i = 0; i < gpio_bank_count; i++) { 1581 for (i = 0; i < gpio_bank_count; i++) {
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index d40cac60b959..211c9f6619e9 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -43,18 +43,7 @@
43#endif 43#endif
44 44
45/* 45/*
46 * Conversion between SDRAM and fake PCI bus, used by USB
47 * NOTE: Physical address must be converted to Local Bus address
48 * on OMAP-1510 only
49 */
50
51/*
52 * Bus address is physical address, except for OMAP-1510 Local Bus. 46 * Bus address is physical address, except for OMAP-1510 Local Bus.
53 */
54#define __virt_to_bus(x) __virt_to_phys(x)
55#define __bus_to_virt(x) __phys_to_virt(x)
56
57/*
58 * OMAP-1510 bus address is translated into a Local Bus address if the 47 * OMAP-1510 bus address is translated into a Local Bus address if the
59 * OMAP bus type is lbus. We do the address translation based on the 48 * OMAP bus type is lbus. We do the address translation based on the
60 * device overriding the defaults used in the dma-mapping API. 49 * device overriding the defaults used in the dma-mapping API.
@@ -74,16 +63,16 @@
74 63
75#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \ 64#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \
76 (dma_addr_t)virt_to_lbus(page_address(page)) : \ 65 (dma_addr_t)virt_to_lbus(page_address(page)) : \
77 (dma_addr_t)__virt_to_bus(page_address(page));}) 66 (dma_addr_t)__virt_to_phys(page_address(page));})
78 67
79#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \ 68#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
80 lbus_to_virt(addr) : \ 69 lbus_to_virt(addr) : \
81 __bus_to_virt(addr)); }) 70 __phys_to_virt(addr)); })
82 71
83#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \ 72#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
84 (dma_addr_t) (is_lbus_device(dev) ? \ 73 (dma_addr_t) (is_lbus_device(dev) ? \
85 virt_to_lbus(__addr) : \ 74 virt_to_lbus(__addr) : \
86 __virt_to_bus(__addr)); }) 75 __virt_to_phys(__addr)); })
87 76
88#endif /* CONFIG_ARCH_OMAP15XX */ 77#endif /* CONFIG_ARCH_OMAP15XX */
89 78
diff --git a/arch/arm/plat-omap/include/mach/pm.h b/arch/arm/plat-omap/include/mach/pm.h
index 768eb6e7abcf..2a9c27ad4c37 100644
--- a/arch/arm/plat-omap/include/mach/pm.h
+++ b/arch/arm/plat-omap/include/mach/pm.h
@@ -128,7 +128,7 @@ void clk_deny_idle(struct clk *clk);
128 * clk_allow_idle - Counters previous clk_deny_idle 128 * clk_allow_idle - Counters previous clk_deny_idle
129 * @clk: clock signal handle 129 * @clk: clock signal handle
130 */ 130 */
131void clk_deny_idle(struct clk *clk); 131void clk_allow_idle(struct clk *clk);
132 132
133extern void omap_pm_idle(void); 133extern void omap_pm_idle(void);
134extern void omap_pm_suspend(void); 134extern void omap_pm_suspend(void);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index a4790f3c7cd3..8c5026be79d4 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -141,7 +141,11 @@ static int is_vbus_present(void)
141 141
142 if (mach->gpio_vbus) { 142 if (mach->gpio_vbus) {
143 int value = gpio_get_value(mach->gpio_vbus); 143 int value = gpio_get_value(mach->gpio_vbus);
144 return mach->gpio_vbus_inverted ? !value : value; 144
145 if (mach->gpio_vbus_inverted)
146 return !value;
147 else
148 return !!value;
145 } 149 }
146 if (mach->udc_is_connected) 150 if (mach->udc_is_connected)
147 return mach->udc_is_connected(); 151 return mach->udc_is_connected();
@@ -982,7 +986,7 @@ static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
982 struct pxa25x_udc *udc; 986 struct pxa25x_udc *udc;
983 987
984 udc = container_of(_gadget, struct pxa25x_udc, gadget); 988 udc = container_of(_gadget, struct pxa25x_udc, gadget);
985 udc->vbus = (is_active != 0); 989 udc->vbus = is_active;
986 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 990 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
987 pullup(udc); 991 pullup(udc);
988 return 0; 992 return 0;
@@ -1399,12 +1403,8 @@ lubbock_vbus_irq(int irq, void *_dev)
1399static irqreturn_t udc_vbus_irq(int irq, void *_dev) 1403static irqreturn_t udc_vbus_irq(int irq, void *_dev)
1400{ 1404{
1401 struct pxa25x_udc *dev = _dev; 1405 struct pxa25x_udc *dev = _dev;
1402 int vbus = gpio_get_value(dev->mach->gpio_vbus);
1403 1406
1404 if (dev->mach->gpio_vbus_inverted) 1407 pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present());
1405 vbus = !vbus;
1406
1407 pxa25x_udc_vbus_session(&dev->gadget, vbus);
1408 return IRQ_HANDLED; 1408 return IRQ_HANDLED;
1409} 1409}
1410 1410
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
index 99da8b6d2c36..ed13889c1162 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/omap/Makefile
@@ -23,7 +23,6 @@ objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o 23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o 24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o 25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
26objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
27 26
28omapfb-objs := $(objs-yy) 27omapfb-objs := $(objs-yy)
29 28
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
deleted file mode 100644
index e55de201b8ff..000000000000
--- a/drivers/video/omap/lcd_sx1.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * LCD panel support for the Siemens SX1 mobile phone
3 *
4 * Current version : Vovan888@gmail.com, great help from FCA00000
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25
26#include <mach/gpio.h>
27#include <mach/omapfb.h>
28#include <mach/mcbsp.h>
29#include <mach/mux.h>
30
31/*
32 * OMAP310 GPIO registers
33 */
34#define GPIO_DATA_INPUT 0xfffce000
35#define GPIO_DATA_OUTPUT 0xfffce004
36#define GPIO_DIR_CONTROL 0xfffce008
37#define GPIO_INT_CONTROL 0xfffce00c
38#define GPIO_INT_MASK 0xfffce010
39#define GPIO_INT_STATUS 0xfffce014
40#define GPIO_PIN_CONTROL 0xfffce018
41
42
43#define A_LCD_SSC_RD 3
44#define A_LCD_SSC_SD 7
45#define _A_LCD_RESET 9
46#define _A_LCD_SSC_CS 12
47#define _A_LCD_SSC_A0 13
48
49#define DSP_REG 0xE1017024
50
51const unsigned char INIT_1[12] = {
52 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
53};
54
55const unsigned char INIT_2[127] = {
56 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
57 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
58 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
59 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
60 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
61 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
62 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
63 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
64 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
65 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
66 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
67 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
68 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
69 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
70 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
71 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
72};
73
74const unsigned char INIT_3[15] = {
75 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
76 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
77};
78
79static void epson_sendbyte(int flag, unsigned char byte)
80{
81 int i, shifter = 0x80;
82
83 if (!flag)
84 gpio_set_value(_A_LCD_SSC_A0, 0);
85 mdelay(2);
86 gpio_set_value(A_LCD_SSC_RD, 1);
87
88 gpio_set_value(A_LCD_SSC_SD, flag);
89
90 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
91 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
92 for (i = 0; i < 8; i++) {
93 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
94 gpio_set_value(A_LCD_SSC_SD, shifter & byte);
95 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
96 shifter >>= 1;
97 }
98 gpio_set_value(_A_LCD_SSC_A0, 1);
99}
100
101static void init_system(void)
102{
103 omap_mcbsp_request(OMAP_MCBSP3);
104 omap_mcbsp_stop(OMAP_MCBSP3);
105}
106
107static void setup_GPIO(void)
108{
109 /* new wave */
110 gpio_request(A_LCD_SSC_RD, "lcd_ssc_rd");
111 gpio_request(A_LCD_SSC_SD, "lcd_ssc_sd");
112 gpio_request(_A_LCD_RESET, "lcd_reset");
113 gpio_request(_A_LCD_SSC_CS, "lcd_ssc_cs");
114 gpio_request(_A_LCD_SSC_A0, "lcd_ssc_a0");
115
116 /* set GPIOs to output, with initial data */
117 gpio_direction_output(A_LCD_SSC_RD, 1);
118 gpio_direction_output(A_LCD_SSC_SD, 0);
119 gpio_direction_output(_A_LCD_RESET, 0);
120 gpio_direction_output(_A_LCD_SSC_CS, 1);
121 gpio_direction_output(_A_LCD_SSC_A0, 1);
122}
123
124static void display_init(void)
125{
126 int i;
127
128 omap_cfg_reg(MCBSP3_CLKX);
129
130 mdelay(2);
131 setup_GPIO();
132 mdelay(2);
133
134 /* reset LCD */
135 gpio_set_value(A_LCD_SSC_SD, 1);
136 epson_sendbyte(0, 0x25);
137
138 gpio_set_value(_A_LCD_RESET, 0);
139 mdelay(10);
140 gpio_set_value(_A_LCD_RESET, 1);
141
142 gpio_set_value(_A_LCD_SSC_CS, 1);
143 mdelay(2);
144 gpio_set_value(_A_LCD_SSC_CS, 0);
145
146 /* init LCD, phase 1 */
147 epson_sendbyte(0, 0xCA);
148 for (i = 0; i < 10; i++)
149 epson_sendbyte(1, INIT_1[i]);
150 gpio_set_value(_A_LCD_SSC_CS, 1);
151 gpio_set_value(_A_LCD_SSC_CS, 0);
152
153 /* init LCD phase 2 */
154 epson_sendbyte(0, 0xCB);
155 for (i = 0; i < 125; i++)
156 epson_sendbyte(1, INIT_2[i]);
157 gpio_set_value(_A_LCD_SSC_CS, 1);
158 gpio_set_value(_A_LCD_SSC_CS, 0);
159
160 /* init LCD phase 2a */
161 epson_sendbyte(0, 0xCC);
162 for (i = 0; i < 14; i++)
163 epson_sendbyte(1, INIT_3[i]);
164 gpio_set_value(_A_LCD_SSC_CS, 1);
165 gpio_set_value(_A_LCD_SSC_CS, 0);
166
167 /* init LCD phase 3 */
168 epson_sendbyte(0, 0xBC);
169 epson_sendbyte(1, 0x08);
170 gpio_set_value(_A_LCD_SSC_CS, 1);
171 gpio_set_value(_A_LCD_SSC_CS, 0);
172
173 /* init LCD phase 4 */
174 epson_sendbyte(0, 0x07);
175 epson_sendbyte(1, 0x05);
176 gpio_set_value(_A_LCD_SSC_CS, 1);
177 gpio_set_value(_A_LCD_SSC_CS, 0);
178
179 /* init LCD phase 5 */
180 epson_sendbyte(0, 0x94);
181 gpio_set_value(_A_LCD_SSC_CS, 1);
182 gpio_set_value(_A_LCD_SSC_CS, 0);
183
184 /* init LCD phase 6 */
185 epson_sendbyte(0, 0xC6);
186 epson_sendbyte(1, 0x80);
187 gpio_set_value(_A_LCD_SSC_CS, 1);
188 mdelay(100); /* used to be 1000 */
189 gpio_set_value(_A_LCD_SSC_CS, 0);
190
191 /* init LCD phase 7 */
192 epson_sendbyte(0, 0x16);
193 epson_sendbyte(1, 0x02);
194 epson_sendbyte(1, 0x00);
195 epson_sendbyte(1, 0xB1);
196 epson_sendbyte(1, 0x00);
197 gpio_set_value(_A_LCD_SSC_CS, 1);
198 gpio_set_value(_A_LCD_SSC_CS, 0);
199
200 /* init LCD phase 8 */
201 epson_sendbyte(0, 0x76);
202 epson_sendbyte(1, 0x00);
203 epson_sendbyte(1, 0x00);
204 epson_sendbyte(1, 0xDB);
205 epson_sendbyte(1, 0x00);
206 gpio_set_value(_A_LCD_SSC_CS, 1);
207 gpio_set_value(_A_LCD_SSC_CS, 0);
208
209 /* init LCD phase 9 */
210 epson_sendbyte(0, 0xAF);
211 gpio_set_value(_A_LCD_SSC_CS, 1);
212}
213
214static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
215{
216 return 0;
217}
218
219static void sx1_panel_cleanup(struct lcd_panel *panel)
220{
221}
222
223static void sx1_panel_disable(struct lcd_panel *panel)
224{
225 printk(KERN_INFO "SX1: LCD panel disable\n");
226 sx1_setmmipower(0);
227 gpio_set_value(_A_LCD_SSC_CS, 1);
228
229 epson_sendbyte(0, 0x25);
230 gpio_set_value(_A_LCD_SSC_CS, 0);
231
232 epson_sendbyte(0, 0xAE);
233 gpio_set_value(_A_LCD_SSC_CS, 1);
234 mdelay(100);
235 gpio_set_value(_A_LCD_SSC_CS, 0);
236
237 epson_sendbyte(0, 0x95);
238 gpio_set_value(_A_LCD_SSC_CS, 1);
239}
240
241static int sx1_panel_enable(struct lcd_panel *panel)
242{
243 printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
244 init_system();
245 display_init();
246
247 sx1_setmmipower(1);
248 sx1_setbacklight(0x18);
249 sx1_setkeylight (0x06);
250 return 0;
251}
252
253
254static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
255{
256 return 0;
257}
258
259struct lcd_panel sx1_panel = {
260 .name = "sx1",
261 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
262 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
263 OMAP_LCDC_INV_OUTPUT_EN,
264
265 .x_res = 176,
266 .y_res = 220,
267 .data_lines = 16,
268 .bpp = 16,
269 .hsw = 5,
270 .hfp = 5,
271 .hbp = 5,
272 .vsw = 2,
273 .vfp = 1,
274 .vbp = 1,
275 .pixel_clock = 1500,
276
277 .init = sx1_panel_init,
278 .cleanup = sx1_panel_cleanup,
279 .enable = sx1_panel_enable,
280 .disable = sx1_panel_disable,
281 .get_caps = sx1_panel_get_caps,
282};
283
284static int sx1_panel_probe(struct platform_device *pdev)
285{
286 omapfb_register_panel(&sx1_panel);
287 return 0;
288}
289
290static int sx1_panel_remove(struct platform_device *pdev)
291{
292 return 0;
293}
294
295static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
296{
297 return 0;
298}
299
300static int sx1_panel_resume(struct platform_device *pdev)
301{
302 return 0;
303}
304
305struct platform_driver sx1_panel_driver = {
306 .probe = sx1_panel_probe,
307 .remove = sx1_panel_remove,
308 .suspend = sx1_panel_suspend,
309 .resume = sx1_panel_resume,
310 .driver = {
311 .name = "lcd_sx1",
312 .owner = THIS_MODULE,
313 },
314};
315
316static int sx1_panel_drv_init(void)
317{
318 return platform_driver_register(&sx1_panel_driver);
319}
320
321static void sx1_panel_drv_cleanup(void)
322{
323 platform_driver_unregister(&sx1_panel_driver);
324}
325
326module_init(sx1_panel_drv_init);
327module_exit(sx1_panel_drv_cleanup);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7dcbc82f3b7b..13875ce9112a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -63,12 +63,14 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
63#endif /* CONFIG_HIGHMEM */ 63#endif /* CONFIG_HIGHMEM */
64 64
65/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 65/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
66#ifndef clear_user_highpage
66static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 67static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
67{ 68{
68 void *addr = kmap_atomic(page, KM_USER0); 69 void *addr = kmap_atomic(page, KM_USER0);
69 clear_user_page(addr, vaddr, page); 70 clear_user_page(addr, vaddr, page);
70 kunmap_atomic(addr, KM_USER0); 71 kunmap_atomic(addr, KM_USER0);
71} 72}
73#endif
72 74
73#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
74/** 76/**