aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig17
-rw-r--r--arch/arm/common/dmabounce.c7
-rw-r--r--arch/arm/include/asm/dma-mapping.h14
-rw-r--r--arch/arm/include/asm/fixmap.h41
-rw-r--r--arch/arm/include/asm/highmem.h31
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/memory.h14
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h5
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h6
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c54
-rw-r--r--arch/arm/mm/cache-xsc3l2.c107
-rw-r--r--arch/arm/mm/dma-mapping.c92
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c116
-rw-r--r--arch/arm/mm/init.c23
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c18
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h8
20 files changed, 492 insertions, 70 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cb4486ad0f72..e62b37a15a1d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -939,6 +939,23 @@ config NODES_SHIFT
939 default "2" 939 default "2"
940 depends on NEED_MULTIPLE_NODES 940 depends on NEED_MULTIPLE_NODES
941 941
942config HIGHMEM
943 bool "High Memory Support (EXPERIMENTAL)"
944 depends on MMU && EXPERIMENTAL
945 help
946 The address space of ARM processors is only 4 Gigabytes large
947 and it has to accommodate user address space, kernel address
948 space as well as some memory mapped IO. That means that, if you
949 have a large amount of physical memory and/or IO, not all of the
950 memory can be "permanently mapped" by the kernel. The physical
951 memory that is not permanently mapped is called "high memory".
952
953 Depending on the selected kernel/user memory split, minimum
954 vmalloc space and actual amount of RAM, you may not need this
955 option which should result in a slightly faster kernel.
956
957 If unsure, say n.
958
942source "mm/Kconfig" 959source "mm/Kconfig"
943 960
944config LEDS 961config LEDS
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index f030f0775be7..734ac9135998 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/page-flags.h>
28#include <linux/device.h> 29#include <linux/device.h>
29#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
30#include <linux/dmapool.h> 31#include <linux/dmapool.h>
@@ -349,6 +350,12 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
349 350
350 BUG_ON(!valid_dma_direction(dir)); 351 BUG_ON(!valid_dma_direction(dir));
351 352
353 if (PageHighMem(page)) {
354 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
355 "is not supported\n");
356 return ~0;
357 }
358
352 return map_single(dev, page_address(page) + offset, size, dir); 359 return map_single(dev, page_address(page) + offset, size, dir);
353} 360}
354EXPORT_SYMBOL(dma_map_page); 361EXPORT_SYMBOL(dma_map_page);
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 22cb14ec3438..ff46dfa68a97 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -15,10 +15,20 @@
15 * must not be used by drivers. 15 * must not be used by drivers.
16 */ 16 */
17#ifndef __arch_page_to_dma 17#ifndef __arch_page_to_dma
18
19#if !defined(CONFIG_HIGHMEM)
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 20static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{ 21{
20 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); 22 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
21} 23}
24#elif defined(__pfn_to_bus)
25static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
26{
27 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
28}
29#else
30#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
31#endif
22 32
23static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
24{ 34{
@@ -57,6 +67,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
58 */ 68 */
59extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 69extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
70extern void dma_cache_maint_page(struct page *page, unsigned long offset,
71 size_t size, int rw);
60 72
61/* 73/*
62 * Return whether the given device DMA address mask can be supported 74 * Return whether the given device DMA address mask can be supported
@@ -316,7 +328,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
316 BUG_ON(!valid_dma_direction(dir)); 328 BUG_ON(!valid_dma_direction(dir));
317 329
318 if (!arch_is_coherent()) 330 if (!arch_is_coherent())
319 dma_cache_maint(page_address(page) + offset, size, dir); 331 dma_cache_maint_page(page, offset, size, dir);
320 332
321 return page_to_dma(dev, page) + offset; 333 return page_to_dma(dev, page) + offset;
322} 334}
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
new file mode 100644
index 000000000000..bbae919bceb4
--- /dev/null
+++ b/arch/arm/include/asm/fixmap.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_FIXMAP_H
2#define _ASM_FIXMAP_H
3
4/*
5 * Nothing too fancy for now.
6 *
7 * On ARM we already have well known fixed virtual addresses imposed by
8 * the architecture such as the vector page which is located at 0xffff0000,
9 * therefore a second level page table is already allocated covering
10 * 0xfff00000 upwards.
11 *
12 * The cache flushing code in proc-xscale.S uses the virtual area between
13 * 0xfffe0000 and 0xfffeffff.
14 */
15
16#define FIXADDR_START 0xfff00000UL
17#define FIXADDR_TOP 0xfffe0000UL
18#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
19
20#define FIX_KMAP_BEGIN 0
21#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
22
23#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
24#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
25
26extern void __this_fixmap_does_not_exist(void);
27
28static inline unsigned long fix_to_virt(const unsigned int idx)
29{
30 if (idx >= FIX_KMAP_END)
31 __this_fixmap_does_not_exist();
32 return __fix_to_virt(idx);
33}
34
35static inline unsigned int virt_to_fix(const unsigned long vaddr)
36{
37 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
38 return __virt_to_fix(vaddr);
39}
40
41#endif
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
new file mode 100644
index 000000000000..7f36d00600b4
--- /dev/null
+++ b/arch/arm/include/asm/highmem.h
@@ -0,0 +1,31 @@
1#ifndef _ASM_HIGHMEM_H
2#define _ASM_HIGHMEM_H
3
4#include <asm/kmap_types.h>
5
6#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
7#define LAST_PKMAP PTRS_PER_PTE
8#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
9#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
11
12#define kmap_prot PAGE_KERNEL
13
14#define flush_cache_kmaps() flush_cache_all()
15
16extern pte_t *pkmap_page_table;
17
18#define ARCH_NEEDS_KMAP_HIGH_GET
19
20extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page);
23
24extern void *kmap(struct page *page);
25extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr);
30
31#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index 45def13ee17a..d16ec97ec9a9 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L2_CACHE,
21 KM_TYPE_NR 22 KM_TYPE_NR
22}; 23};
23 24
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 0202a7c20e62..85763db87449 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,14 +44,21 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#define MODULES_END (PAGE_OFFSET) 47#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
48#define MODULES_VADDR (MODULES_END - 16*1048576)
49
50#if TASK_SIZE > MODULES_VADDR 48#if TASK_SIZE > MODULES_VADDR
51#error Top of user space clashes with start of module space 49#error Top of user space clashes with start of module space
52#endif 50#endif
53 51
54/* 52/*
53 * The highmem pkmap virtual space shares the end of the module area.
54 */
55#ifdef CONFIG_HIGHMEM
56#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
57#else
58#define MODULES_END (PAGE_OFFSET)
59#endif
60
61/*
55 * The XIP kernel gets mapped at the bottom of the module vm area. 62 * The XIP kernel gets mapped at the bottom of the module vm area.
56 * Since we use sections to map it, this macro replaces the physical address 63 * Since we use sections to map it, this macro replaces the physical address
57 * with its virtual address while keeping offset from the base section. 64 * with its virtual address while keeping offset from the base section.
@@ -181,6 +188,7 @@ static inline void *phys_to_virt(unsigned long x)
181#ifndef __virt_to_bus 188#ifndef __virt_to_bus
182#define __virt_to_bus __virt_to_phys 189#define __virt_to_bus __virt_to_phys
183#define __bus_to_virt __phys_to_virt 190#define __bus_to_virt __phys_to_virt
191#define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
184#endif 192#endif
185 193
186static inline __deprecated unsigned long virt_to_bus(void *x) 194static inline __deprecated unsigned long virt_to_bus(void *x)
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index e012bf13c955..42ae29b288a1 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -59,7 +59,10 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
59 }) 59 })
60 60
61#define __arch_page_to_dma(dev, page) \ 61#define __arch_page_to_dma(dev, page) \
62 __arch_virt_to_dma(dev, page_address(page)) 62 ({ \
63 /* __is_lbus_virt() can never be true for RAM pages */ \
64 (dma_addr_t)page_to_phys(page); \
65 })
63 66
64#endif /* CONFIG_ARCH_IOP13XX */ 67#endif /* CONFIG_ARCH_IOP13XX */
65#endif /* !ASSEMBLY */ 68#endif /* !ASSEMBLY */
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 6d5887cf5742..76e5308685a4 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -35,7 +35,11 @@ extern struct bus_type platform_bus_type;
35 __phys_to_virt(x) : __bus_to_virt(x)); }) 35 __phys_to_virt(x) : __bus_to_virt(x)); })
36#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \ 36#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \
37 (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); }) 37 (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
38#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x)) 38#define __arch_page_to_dma(dev, x) \
39 ({ dma_addr_t __dma = page_to_phys(page); \
40 if (!is_lbus_device(dev)) \
41 __dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
42 __dma; })
39 43
40#endif 44#endif
41 45
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 64149d9e55a5..c264683538bc 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
16 16
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o 18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19obj-$(CONFIG_HIGHMEM) += highmem.o
19 20
20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 21obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
21obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o 22obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 80cd207cbaea..d6dd83826f8a 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -14,8 +14,12 @@
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/kmap_types.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
17#include <plat/cache-feroceon-l2.h> 21#include <plat/cache-feroceon-l2.h>
18 22#include "mm.h"
19 23
20/* 24/*
21 * Low-level cache maintenance operations. 25 * Low-level cache maintenance operations.
@@ -34,14 +38,36 @@
34 * The range operations require two successive cp15 writes, in 38 * The range operations require two successive cp15 writes, in
35 * between which we don't want to be preempted. 39 * between which we don't want to be preempted.
36 */ 40 */
41
42static inline unsigned long l2_start_va(unsigned long paddr)
43{
44#ifdef CONFIG_HIGHMEM
45 /*
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of
51 * interrupts by the caller.
52 */
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
58#else
59 return __phys_to_virt(paddr);
60#endif
61}
62
37static inline void l2_clean_pa(unsigned long addr) 63static inline void l2_clean_pa(unsigned long addr)
38{ 64{
39 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); 65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
40} 66}
41 67
42static inline void l2_clean_mva_range(unsigned long start, unsigned long end) 68static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
43{ 69{
44 unsigned long flags; 70 unsigned long va_start, va_end, flags;
45 71
46 /* 72 /*
47 * Make sure 'start' and 'end' reference the same page, as 73 * Make sure 'start' and 'end' reference the same page, as
@@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
51 BUG_ON((start ^ end) >> PAGE_SHIFT); 77 BUG_ON((start ^ end) >> PAGE_SHIFT);
52 78
53 raw_local_irq_save(flags); 79 raw_local_irq_save(flags);
80 va_start = l2_start_va(start);
81 va_end = va_start + (end - start);
54 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
55 "mcr p15, 1, %1, c15, c9, 5" 83 "mcr p15, 1, %1, c15, c9, 5"
56 : : "r" (start), "r" (end)); 84 : : "r" (va_start), "r" (va_end));
57 raw_local_irq_restore(flags); 85 raw_local_irq_restore(flags);
58} 86}
59 87
60static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
61{
62 l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
63}
64
65static inline void l2_clean_inv_pa(unsigned long addr) 88static inline void l2_clean_inv_pa(unsigned long addr)
66{ 89{
67 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); 90 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
@@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr)
72 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); 95 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
73} 96}
74 97
75static inline void l2_inv_mva_range(unsigned long start, unsigned long end) 98static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
76{ 99{
77 unsigned long flags; 100 unsigned long va_start, va_end, flags;
78 101
79 /* 102 /*
80 * Make sure 'start' and 'end' reference the same page, as 103 * Make sure 'start' and 'end' reference the same page, as
@@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
84 BUG_ON((start ^ end) >> PAGE_SHIFT); 107 BUG_ON((start ^ end) >> PAGE_SHIFT);
85 108
86 raw_local_irq_save(flags); 109 raw_local_irq_save(flags);
110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start);
87 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
88 "mcr p15, 1, %1, c15, c11, 5" 113 "mcr p15, 1, %1, c15, c11, 5"
89 : : "r" (start), "r" (end)); 114 : : "r" (va_start), "r" (va_end));
90 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
91} 116}
92 117
93static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
94{
95 l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
96}
97
98 118
99/* 119/*
100 * Linux primitives. 120 * Linux primitives.
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 464de893a988..5d180cb0bd94 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -17,12 +17,14 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/io.h>
22
23#include <asm/system.h> 20#include <asm/system.h>
24#include <asm/cputype.h> 21#include <asm/cputype.h>
25#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/kmap_types.h>
24#include <asm/fixmap.h>
25#include <asm/pgtable.h>
26#include <asm/tlbflush.h>
27#include "mm.h"
26 28
27#define CR_L2 (1 << 26) 29#define CR_L2 (1 << 26)
28 30
@@ -47,21 +49,11 @@ static inline void xsc3_l2_clean_mva(unsigned long addr)
47 __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); 49 __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
48} 50}
49 51
50static inline void xsc3_l2_clean_pa(unsigned long addr)
51{
52 xsc3_l2_clean_mva(__phys_to_virt(addr));
53}
54
55static inline void xsc3_l2_inv_mva(unsigned long addr) 52static inline void xsc3_l2_inv_mva(unsigned long addr)
56{ 53{
57 __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); 54 __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
58} 55}
59 56
60static inline void xsc3_l2_inv_pa(unsigned long addr)
61{
62 xsc3_l2_inv_mva(__phys_to_virt(addr));
63}
64
65static inline void xsc3_l2_inv_all(void) 57static inline void xsc3_l2_inv_all(void)
66{ 58{
67 unsigned long l2ctype, set_way; 59 unsigned long l2ctype, set_way;
@@ -79,50 +71,103 @@ static inline void xsc3_l2_inv_all(void)
79 dsb(); 71 dsb();
80} 72}
81 73
74#ifdef CONFIG_HIGHMEM
75#define l2_map_save_flags(x) raw_local_save_flags(x)
76#define l2_map_restore_flags(x) raw_local_irq_restore(x)
77#else
78#define l2_map_save_flags(x) ((x) = 0)
79#define l2_map_restore_flags(x) ((void)(x))
80#endif
81
82static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
83 unsigned long flags)
84{
85#ifdef CONFIG_HIGHMEM
86 unsigned long va = prev_va & PAGE_MASK;
87 unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
88 if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
89 /*
90 * Switching to a new page. Because cache ops are
91 * using virtual addresses only, we must put a mapping
92 * in place for it. We also enable interrupts for a
93 * short while and disable them again to protect this
94 * mapping.
95 */
96 unsigned long idx;
97 raw_local_irq_restore(flags);
98 idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
99 va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
100 raw_local_irq_restore(flags | PSR_I_BIT);
101 set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
102 local_flush_tlb_kernel_page(va);
103 }
104 return va + (pa_offset >> (32 - PAGE_SHIFT));
105#else
106 return __phys_to_virt(pa);
107#endif
108}
109
82static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 110static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
83{ 111{
112 unsigned long vaddr, flags;
113
84 if (start == 0 && end == -1ul) { 114 if (start == 0 && end == -1ul) {
85 xsc3_l2_inv_all(); 115 xsc3_l2_inv_all();
86 return; 116 return;
87 } 117 }
88 118
119 vaddr = -1; /* to force the first mapping */
120 l2_map_save_flags(flags);
121
89 /* 122 /*
90 * Clean and invalidate partial first cache line. 123 * Clean and invalidate partial first cache line.
91 */ 124 */
92 if (start & (CACHE_LINE_SIZE - 1)) { 125 if (start & (CACHE_LINE_SIZE - 1)) {
93 xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1)); 126 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
94 xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); 127 xsc3_l2_clean_mva(vaddr);
128 xsc3_l2_inv_mva(vaddr);
95 start = (start | (CACHE_LINE_SIZE - 1)) + 1; 129 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
96 } 130 }
97 131
98 /* 132 /*
99 * Clean and invalidate partial last cache line. 133 * Invalidate all full cache lines between 'start' and 'end'.
100 */ 134 */
101 if (start < end && (end & (CACHE_LINE_SIZE - 1))) { 135 while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); 136 vaddr = l2_map_va(start, vaddr, flags);
103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); 137 xsc3_l2_inv_mva(vaddr);
104 end &= ~(CACHE_LINE_SIZE - 1); 138 start += CACHE_LINE_SIZE;
105 } 139 }
106 140
107 /* 141 /*
108 * Invalidate all full cache lines between 'start' and 'end'. 142 * Clean and invalidate partial last cache line.
109 */ 143 */
110 while (start < end) { 144 if (start < end) {
111 xsc3_l2_inv_pa(start); 145 vaddr = l2_map_va(start, vaddr, flags);
112 start += CACHE_LINE_SIZE; 146 xsc3_l2_clean_mva(vaddr);
147 xsc3_l2_inv_mva(vaddr);
113 } 148 }
114 149
150 l2_map_restore_flags(flags);
151
115 dsb(); 152 dsb();
116} 153}
117 154
118static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 155static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
119{ 156{
157 unsigned long vaddr, flags;
158
159 vaddr = -1; /* to force the first mapping */
160 l2_map_save_flags(flags);
161
120 start &= ~(CACHE_LINE_SIZE - 1); 162 start &= ~(CACHE_LINE_SIZE - 1);
121 while (start < end) { 163 while (start < end) {
122 xsc3_l2_clean_pa(start); 164 vaddr = l2_map_va(start, vaddr, flags);
165 xsc3_l2_clean_mva(vaddr);
123 start += CACHE_LINE_SIZE; 166 start += CACHE_LINE_SIZE;
124 } 167 }
125 168
169 l2_map_restore_flags(flags);
170
126 dsb(); 171 dsb();
127} 172}
128 173
@@ -148,18 +193,26 @@ static inline void xsc3_l2_flush_all(void)
148 193
149static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 194static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
150{ 195{
196 unsigned long vaddr, flags;
197
151 if (start == 0 && end == -1ul) { 198 if (start == 0 && end == -1ul) {
152 xsc3_l2_flush_all(); 199 xsc3_l2_flush_all();
153 return; 200 return;
154 } 201 }
155 202
203 vaddr = -1; /* to force the first mapping */
204 l2_map_save_flags(flags);
205
156 start &= ~(CACHE_LINE_SIZE - 1); 206 start &= ~(CACHE_LINE_SIZE - 1);
157 while (start < end) { 207 while (start < end) {
158 xsc3_l2_clean_pa(start); 208 vaddr = l2_map_va(start, vaddr, flags);
159 xsc3_l2_inv_pa(start); 209 xsc3_l2_clean_mva(vaddr);
210 xsc3_l2_inv_mva(vaddr);
160 start += CACHE_LINE_SIZE; 211 start += CACHE_LINE_SIZE;
161 } 212 }
162 213
214 l2_map_restore_flags(flags);
215
163 dsb(); 216 dsb();
164} 217}
165 218
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 310e479309ef..510c179b0ac8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20 20
21#include <asm/memory.h> 21#include <asm/memory.h>
22#include <asm/highmem.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
24#include <asm/sizes.h> 25#include <asm/sizes.h>
@@ -490,29 +491,101 @@ core_initcall(consistent_init);
490 */ 491 */
491void dma_cache_maint(const void *start, size_t size, int direction) 492void dma_cache_maint(const void *start, size_t size, int direction)
492{ 493{
493 const void *end = start + size; 494 void (*inner_op)(const void *, const void *);
495 void (*outer_op)(unsigned long, unsigned long);
494 496
495 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); 497 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
496 498
497 switch (direction) { 499 switch (direction) {
498 case DMA_FROM_DEVICE: /* invalidate only */ 500 case DMA_FROM_DEVICE: /* invalidate only */
499 dmac_inv_range(start, end); 501 inner_op = dmac_inv_range;
500 outer_inv_range(__pa(start), __pa(end)); 502 outer_op = outer_inv_range;
501 break; 503 break;
502 case DMA_TO_DEVICE: /* writeback only */ 504 case DMA_TO_DEVICE: /* writeback only */
503 dmac_clean_range(start, end); 505 inner_op = dmac_clean_range;
504 outer_clean_range(__pa(start), __pa(end)); 506 outer_op = outer_clean_range;
505 break; 507 break;
506 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 508 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
507 dmac_flush_range(start, end); 509 inner_op = dmac_flush_range;
508 outer_flush_range(__pa(start), __pa(end)); 510 outer_op = outer_flush_range;
509 break; 511 break;
510 default: 512 default:
511 BUG(); 513 BUG();
512 } 514 }
515
516 inner_op(start, start + size);
517 outer_op(__pa(start), __pa(start) + size);
513} 518}
514EXPORT_SYMBOL(dma_cache_maint); 519EXPORT_SYMBOL(dma_cache_maint);
515 520
521static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
522 size_t size, int direction)
523{
524 void *vaddr;
525 unsigned long paddr;
526 void (*inner_op)(const void *, const void *);
527 void (*outer_op)(unsigned long, unsigned long);
528
529 switch (direction) {
530 case DMA_FROM_DEVICE: /* invalidate only */
531 inner_op = dmac_inv_range;
532 outer_op = outer_inv_range;
533 break;
534 case DMA_TO_DEVICE: /* writeback only */
535 inner_op = dmac_clean_range;
536 outer_op = outer_clean_range;
537 break;
538 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
539 inner_op = dmac_flush_range;
540 outer_op = outer_flush_range;
541 break;
542 default:
543 BUG();
544 }
545
546 if (!PageHighMem(page)) {
547 vaddr = page_address(page) + offset;
548 inner_op(vaddr, vaddr + size);
549 } else {
550 vaddr = kmap_high_get(page);
551 if (vaddr) {
552 vaddr += offset;
553 inner_op(vaddr, vaddr + size);
554 kunmap_high(page);
555 }
556 }
557
558 paddr = page_to_phys(page) + offset;
559 outer_op(paddr, paddr + size);
560}
561
562void dma_cache_maint_page(struct page *page, unsigned long offset,
563 size_t size, int dir)
564{
565 /*
566 * A single sg entry may refer to multiple physically contiguous
567 * pages. But we still need to process highmem pages individually.
568 * If highmem is not configured then the bulk of this loop gets
569 * optimized out.
570 */
571 size_t left = size;
572 do {
573 size_t len = left;
574 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
575 if (offset >= PAGE_SIZE) {
576 page += offset / PAGE_SIZE;
577 offset %= PAGE_SIZE;
578 }
579 len = PAGE_SIZE - offset;
580 }
581 dma_cache_maint_contiguous(page, offset, len, dir);
582 offset = 0;
583 page++;
584 left -= len;
585 } while (left);
586}
587EXPORT_SYMBOL(dma_cache_maint_page);
588
516/** 589/**
517 * dma_map_sg - map a set of SG buffers for streaming mode DMA 590 * dma_map_sg - map a set of SG buffers for streaming mode DMA
518 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 591 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -610,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
610 continue; 683 continue;
611 684
612 if (!arch_is_coherent()) 685 if (!arch_is_coherent())
613 dma_cache_maint(sg_virt(s), s->length, dir); 686 dma_cache_maint_page(sg_page(s), s->offset,
687 s->length, dir);
614 } 688 }
615} 689}
616EXPORT_SYMBOL(dma_sync_sg_for_device); 690EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0fa9bf388f0b..4e283481cee1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -192,7 +192,7 @@ void flush_dcache_page(struct page *page)
192 struct address_space *mapping = page_mapping(page); 192 struct address_space *mapping = page_mapping(page);
193 193
194#ifndef CONFIG_SMP 194#ifndef CONFIG_SMP
195 if (mapping && !mapping_mapped(mapping)) 195 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
196 set_bit(PG_dcache_dirty, &page->flags); 196 set_bit(PG_dcache_dirty, &page->flags);
197 else 197 else
198#endif 198#endif
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
new file mode 100644
index 000000000000..a34954d9df7d
--- /dev/null
+++ b/arch/arm/mm/highmem.c
@@ -0,0 +1,116 @@
1/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include "mm.h"
20
21void *kmap(struct page *page)
22{
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
27}
28EXPORT_SYMBOL(kmap);
29
30void kunmap(struct page *page)
31{
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
36}
37EXPORT_SYMBOL(kunmap);
38
39void *kmap_atomic(struct page *page, enum km_type type)
40{
41 unsigned int idx;
42 unsigned long vaddr;
43
44 pagefault_disable();
45 if (!PageHighMem(page))
46 return page_address(page);
47
48 idx = type + KM_TYPE_NR * smp_processor_id();
49 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
50#ifdef CONFIG_DEBUG_HIGHMEM
51 /*
52 * With debugging enabled, kunmap_atomic forces that entry to 0.
53 * Make sure it was indeed properly unmapped.
54 */
55 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
56#endif
57 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
58 /*
59 * When debugging is off, kunmap_atomic leaves the previous mapping
60 * in place, so this TLB flush ensures the TLB is updated with the
61 * new mapping.
62 */
63 local_flush_tlb_kernel_page(vaddr);
64
65 return (void *)vaddr;
66}
67EXPORT_SYMBOL(kmap_atomic);
68
69void kunmap_atomic(void *kvaddr, enum km_type type)
70{
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
73
74 if (kvaddr >= (void *)FIXADDR_START) {
75 __cpuc_flush_dcache_page((void *)vaddr);
76#ifdef CONFIG_DEBUG_HIGHMEM
77 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
78 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
79 local_flush_tlb_kernel_page(vaddr);
80#else
81 (void) idx; /* to kill a warning */
82#endif
83 }
84 pagefault_enable();
85}
86EXPORT_SYMBOL(kunmap_atomic);
87
88void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
89{
90 unsigned int idx;
91 unsigned long vaddr;
92
93 pagefault_disable();
94
95 idx = type + KM_TYPE_NR * smp_processor_id();
96 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
97#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
99#endif
100 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
101 local_flush_tlb_kernel_page(vaddr);
102
103 return (void *)vaddr;
104}
105
106struct page *kmap_atomic_to_page(const void *ptr)
107{
108 unsigned long vaddr = (unsigned long)ptr;
109 pte_t *pte;
110
111 if (vaddr < FIXADDR_START)
112 return virt_to_page(ptr);
113
114 pte = TOP_PTE(vaddr);
115 return pte_page(*pte);
116}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 34df4d9d03a6..8277802ec859 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,6 +15,7 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/highmem.h>
18 19
19#include <asm/mach-types.h> 20#include <asm/mach-types.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -382,7 +383,7 @@ void __init bootmem_init(void)
382 for_each_node(node) 383 for_each_node(node)
383 bootmem_free_node(node, mi); 384 bootmem_free_node(node, mi);
384 385
385 high_memory = __va(memend_pfn << PAGE_SHIFT); 386 high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1;
386 387
387 /* 388 /*
388 * This doesn't seem to be used by the Linux memory manager any 389 * This doesn't seem to be used by the Linux memory manager any
@@ -485,7 +486,7 @@ void __init mem_init(void)
485 int i, node; 486 int i, node;
486 487
487#ifndef CONFIG_DISCONTIGMEM 488#ifndef CONFIG_DISCONTIGMEM
488 max_mapnr = virt_to_page(high_memory) - mem_map; 489 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
489#endif 490#endif
490 491
491 /* this will put all unused low memory onto the freelists */ 492 /* this will put all unused low memory onto the freelists */
@@ -504,6 +505,19 @@ void __init mem_init(void)
504 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 505 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
505#endif 506#endif
506 507
508#ifdef CONFIG_HIGHMEM
509 /* set highmem page free */
510 for_each_online_node(node) {
511 for_each_nodebank (i, &meminfo, node) {
512 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
513 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
514 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
515 totalhigh_pages += free_area(start, end, NULL);
516 }
517 }
518 totalram_pages += totalhigh_pages;
519#endif
520
507 /* 521 /*
508 * Since our memory may not be contiguous, calculate the 522 * Since our memory may not be contiguous, calculate the
509 * real number of pages we have in this system 523 * real number of pages we have in this system
@@ -521,9 +535,10 @@ void __init mem_init(void)
521 initsize = __init_end - __init_begin; 535 initsize = __init_end - __init_begin;
522 536
523 printk(KERN_NOTICE "Memory: %luKB available (%dK code, " 537 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
524 "%dK data, %dK init)\n", 538 "%dK data, %dK init, %luK highmem)\n",
525 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 539 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
526 codesize >> 10, datasize >> 10, initsize >> 10); 540 codesize >> 10, datasize >> 10, initsize >> 10,
541 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
527 542
528 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 543 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
529 extern int sysctl_overcommit_memory; 544 extern int sysctl_overcommit_memory;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 95bbe112965e..c4f6f05198e0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,7 +1,6 @@
1/* the upper-most page table pointer */
2
3#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
4 2
3/* the upper-most page table pointer */
5extern pmd_t *top_pmd; 4extern pmd_t *top_pmd;
6 5
7#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5358fcc7f61e..f7457fea6de8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
124{ 124{
125 if (addr < PHYS_OFFSET) 125 if (addr < PHYS_OFFSET)
126 return 0; 126 return 0;
127 if (addr + size > __pa(high_memory)) 127 if (addr + size >= __pa(high_memory - 1))
128 return 0; 128 return 0;
129 129
130 return 1; 130 return 1;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8c6fc5a6237e..1585814f8414 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -18,9 +18,11 @@
18#include <asm/cputype.h> 18#include <asm/cputype.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20#include <asm/sections.h> 20#include <asm/sections.h>
21#include <asm/cachetype.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
22#include <asm/sizes.h> 23#include <asm/sizes.h>
23#include <asm/tlb.h> 24#include <asm/tlb.h>
25#include <asm/highmem.h>
24 26
25#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 28#include <asm/mach/map.h>
@@ -700,6 +702,10 @@ static void __init sanity_check_meminfo(void)
700 if (meminfo.nr_banks >= NR_BANKS) { 702 if (meminfo.nr_banks >= NR_BANKS) {
701 printk(KERN_CRIT "NR_BANKS too low, " 703 printk(KERN_CRIT "NR_BANKS too low, "
702 "ignoring high memory\n"); 704 "ignoring high memory\n");
705 } else if (cache_is_vipt_aliasing()) {
706 printk(KERN_CRIT "HIGHMEM is not yet supported "
707 "with VIPT aliasing cache, "
708 "ignoring high memory\n");
703 } else { 709 } else {
704 memmove(bank + 1, bank, 710 memmove(bank + 1, bank,
705 (meminfo.nr_banks - i) * sizeof(*bank)); 711 (meminfo.nr_banks - i) * sizeof(*bank));
@@ -918,6 +924,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
918 flush_cache_all(); 924 flush_cache_all();
919} 925}
920 926
927static void __init kmap_init(void)
928{
929#ifdef CONFIG_HIGHMEM
930 pmd_t *pmd = pmd_off_k(PKMAP_BASE);
931 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
932 BUG_ON(!pmd_none(*pmd) || !pte);
933 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
934 pkmap_page_table = pte + PTRS_PER_PTE;
935#endif
936}
937
921/* 938/*
922 * paging_init() sets up the page tables, initialises the zone memory 939 * paging_init() sets up the page tables, initialises the zone memory
923 * maps, and sets up the zero page, bad page and bad page tables. 940 * maps, and sets up the zero page, bad page and bad page tables.
@@ -931,6 +948,7 @@ void __init paging_init(struct machine_desc *mdesc)
931 prepare_page_table(); 948 prepare_page_table();
932 bootmem_init(); 949 bootmem_init();
933 devicemaps_init(mdesc); 950 devicemaps_init(mdesc);
951 kmap_init();
934 952
935 top_pmd = pmd_off_k(0xffff0000); 953 top_pmd = pmd_off_k(0xffff0000);
936 954
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index d6b5ca6c7da2..99ed564d9277 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -61,9 +61,11 @@
61#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET) 61#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
62#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0)) 62#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0))
63 63
64#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \ 64#define __arch_page_to_dma(dev, page) \
65 (dma_addr_t)virt_to_lbus(page_address(page)) : \ 65 ({ dma_addr_t __dma = page_to_phys(page); \
66 (dma_addr_t)__virt_to_phys(page_address(page));}) 66 if (is_lbus_device(dev)) \
67 __dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \
68 __dma; })
67 69
68#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \ 70#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
69 lbus_to_virt(addr) : \ 71 lbus_to_virt(addr) : \