aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-l2x0.c48
-rw-r--r--arch/arm/mm/cache-tauros3.h41
-rw-r--r--arch/arm/mm/cache-v7.S14
-rw-r--r--arch/arm/mm/context.c41
-rw-r--r--arch/arm/mm/dma-mapping.c6
-rw-r--r--arch/arm/mm/dump.c345
-rw-r--r--arch/arm/mm/flush.c6
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mmu.c126
-rw-r--r--arch/arm/mm/pgd.c2
12 files changed, 573 insertions, 63 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ecfe6e53f6e0..7f39ce2f841f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -12,6 +12,7 @@ ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
13endif 13endif
14 14
15obj-$(CONFIG_ARM_PTDUMP) += dump.o
15obj-$(CONFIG_MODULES) += proc-syms.o 16obj-$(CONFIG_MODULES) += proc-syms.o
16 17
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 18obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 447da6ffadd5..7abde2ce8973 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -25,6 +25,7 @@
25 25
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
28#include "cache-tauros3.h"
28#include "cache-aurora-l2.h" 29#include "cache-aurora-l2.h"
29 30
30#define CACHE_LINE_SIZE 32 31#define CACHE_LINE_SIZE 32
@@ -767,6 +768,14 @@ static void aurora_save(void)
767 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 768 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
768} 769}
769 770
771static void __init tauros3_save(void)
772{
773 l2x0_saved_regs.aux2_ctrl =
774 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
775 l2x0_saved_regs.prefetch_ctrl =
776 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
777}
778
770static void l2x0_resume(void) 779static void l2x0_resume(void)
771{ 780{
772 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 781 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
@@ -821,6 +830,18 @@ static void aurora_resume(void)
821 } 830 }
822} 831}
823 832
833static void tauros3_resume(void)
834{
835 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
836 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
837 l2x0_base + TAUROS3_AUX2_CTRL);
838 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
839 l2x0_base + L2X0_PREFETCH_CTRL);
840 }
841
842 l2x0_resume();
843}
844
824static void __init aurora_broadcast_l2_commands(void) 845static void __init aurora_broadcast_l2_commands(void)
825{ 846{
826 __u32 u; 847 __u32 u;
@@ -906,6 +927,15 @@ static const struct l2x0_of_data aurora_no_outer_data = {
906 }, 927 },
907}; 928};
908 929
930static const struct l2x0_of_data tauros3_data = {
931 .setup = NULL,
932 .save = tauros3_save,
933 /* Tauros3 broadcasts L1 cache operations to L2 */
934 .outer_cache = {
935 .resume = tauros3_resume,
936 },
937};
938
909static const struct l2x0_of_data bcm_l2x0_data = { 939static const struct l2x0_of_data bcm_l2x0_data = {
910 .setup = pl310_of_setup, 940 .setup = pl310_of_setup,
911 .save = pl310_save, 941 .save = pl310_save,
@@ -922,17 +952,19 @@ static const struct l2x0_of_data bcm_l2x0_data = {
922}; 952};
923 953
924static const struct of_device_id l2x0_ids[] __initconst = { 954static const struct of_device_id l2x0_ids[] __initconst = {
925 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
926 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
927 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 955 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
928 { .compatible = "marvell,aurora-system-cache", 956 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
929 .data = (void *)&aurora_no_outer_data}, 957 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
930 { .compatible = "marvell,aurora-outer-cache",
931 .data = (void *)&aurora_with_outer_data},
932 { .compatible = "brcm,bcm11351-a2-pl310-cache",
933 .data = (void *)&bcm_l2x0_data},
934 { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */ 958 { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
935 .data = (void *)&bcm_l2x0_data}, 959 .data = (void *)&bcm_l2x0_data},
960 { .compatible = "brcm,bcm11351-a2-pl310-cache",
961 .data = (void *)&bcm_l2x0_data},
962 { .compatible = "marvell,aurora-outer-cache",
963 .data = (void *)&aurora_with_outer_data},
964 { .compatible = "marvell,aurora-system-cache",
965 .data = (void *)&aurora_no_outer_data},
966 { .compatible = "marvell,tauros3-cache",
967 .data = (void *)&tauros3_data },
936 {} 968 {}
937}; 969};
938 970
diff --git a/arch/arm/mm/cache-tauros3.h b/arch/arm/mm/cache-tauros3.h
new file mode 100644
index 000000000000..02c0a97cbc02
--- /dev/null
+++ b/arch/arm/mm/cache-tauros3.h
@@ -0,0 +1,41 @@
1/*
2 * Marvell Tauros3 cache controller includes
3 *
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5 *
6 * based on GPL'ed 2.6 kernel sources
7 * (c) Marvell International Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#ifndef __ASM_ARM_HARDWARE_TAUROS3_H
24#define __ASM_ARM_HARDWARE_TAUROS3_H
25
26/*
27 * Marvell Tauros3 L2CC is compatible with PL310 r0p0
28 * but with PREFETCH_CTRL (r2p0) and an additional event counter.
29 * Also, there is AUX2_CTRL for some Marvell specific control.
30 */
31
32#define TAUROS3_EVENT_CNT2_CFG 0x224
33#define TAUROS3_EVENT_CNT2_VAL 0x228
34#define TAUROS3_INV_ALL 0x780
35#define TAUROS3_CLEAN_ALL 0x784
36#define TAUROS3_AUX2_CTRL 0x820
37
38/* Registers shifts and masks */
39#define TAUROS3_AUX2_CTRL_LINEFILL_BURST8_EN (1 << 2)
40
41#endif
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b5c467a65c27..778bcf88ee79 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -146,18 +146,18 @@ flush_levels:
146 ldr r7, =0x7fff 146 ldr r7, =0x7fff
147 ands r7, r7, r1, lsr #13 @ extract max number of the index size 147 ands r7, r7, r1, lsr #13 @ extract max number of the index size
148loop1: 148loop1:
149 mov r9, r4 @ create working copy of max way size 149 mov r9, r7 @ create working copy of max index
150loop2: 150loop2:
151 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 151 ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
152 THUMB( lsl r6, r9, r5 ) 152 THUMB( lsl r6, r4, r5 )
153 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 153 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
154 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 154 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
155 THUMB( lsl r6, r7, r2 ) 155 THUMB( lsl r6, r9, r2 )
156 THUMB( orr r11, r11, r6 ) @ factor index number into r11 156 THUMB( orr r11, r11, r6 ) @ factor index number into r11
157 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 157 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
158 subs r9, r9, #1 @ decrement the way 158 subs r9, r9, #1 @ decrement the index
159 bge loop2 159 bge loop2
160 subs r7, r7, #1 @ decrement the index 160 subs r4, r4, #1 @ decrement the way
161 bge loop1 161 bge loop1
162skip: 162skip:
163 add r10, r10, #2 @ increment cache number 163 add r10, r10, #2 @ increment cache number
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 84e6f772e204..6eb97b3a7481 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,8 +36,8 @@
36 * The context ID is used by debuggers and trace logic, and 36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes. 37 * should be unique within all running processes.
38 * 38 *
39 * In big endian operation, the two 32 bit words are swapped if accesed by 39 * In big endian operation, the two 32 bit words are swapped if accessed
40 * non 64-bit operations. 40 * by non-64-bit operations.
41 */ 41 */
42#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 42#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43#define NUM_USER_ASIDS ASID_FIRST_VERSION 43#define NUM_USER_ASIDS ASID_FIRST_VERSION
@@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
78#endif 78#endif
79 79
80#ifdef CONFIG_ARM_LPAE 80#ifdef CONFIG_ARM_LPAE
81static void cpu_set_reserved_ttbr0(void) 81/*
82{ 82 * With LPAE, the ASID and page tables are updated atomicly, so there is
83 /* 83 * no need for a reserved set of tables (the active ASID tracking prevents
84 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 84 * any issues across a rollover).
85 * ASID is set to 0. 85 */
86 */ 86#define cpu_set_reserved_ttbr0()
87 cpu_set_ttbr(0, __pa(swapper_pg_dir));
88 isb();
89}
90#else 87#else
91static void cpu_set_reserved_ttbr0(void) 88static void cpu_set_reserved_ttbr0(void)
92{ 89{
93 u32 ttb; 90 u32 ttb;
94 /* Copy TTBR1 into TTBR0 */ 91 /*
92 * Copy TTBR1 into TTBR0.
93 * This points at swapper_pg_dir, which contains only global
94 * entries so any speculative walks are perfectly safe.
95 */
95 asm volatile( 96 asm volatile(
96 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 97 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
97 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 98 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
@@ -179,6 +180,7 @@ static int is_reserved_asid(u64 asid)
179 180
180static u64 new_context(struct mm_struct *mm, unsigned int cpu) 181static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181{ 182{
183 static u32 cur_idx = 1;
182 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
183 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
184 186
@@ -193,10 +195,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
193 * Allocate a free ASID. If we can't find one, take a 195 * Allocate a free ASID. If we can't find one, take a
194 * note of the currently active ASIDs and mark the TLBs 196 * note of the currently active ASIDs and mark the TLBs
195 * as requiring flushes. We always count from ASID #1, 197 * as requiring flushes. We always count from ASID #1,
196 * as we reserve ASID #0 to switch via TTBR0 and indicate 198 * as we reserve ASID #0 to switch via TTBR0 and to
197 * rollover events. 199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
198 */ 203 */
199 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
200 if (asid == NUM_USER_ASIDS) { 205 if (asid == NUM_USER_ASIDS) {
201 generation = atomic64_add_return(ASID_FIRST_VERSION, 206 generation = atomic64_add_return(ASID_FIRST_VERSION,
202 &asid_generation); 207 &asid_generation);
@@ -204,6 +209,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
205 } 210 }
206 __set_bit(asid, asid_map); 211 __set_bit(asid, asid_map);
212 cur_idx = asid;
207 asid |= generation; 213 asid |= generation;
208 cpumask_clear(mm_cpumask(mm)); 214 cpumask_clear(mm_cpumask(mm));
209 } 215 }
@@ -221,8 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
221 __check_vmalloc_seq(mm); 227 __check_vmalloc_seq(mm);
222 228
223 /* 229 /*
224 * Required during context switch to avoid speculative page table 230 * We cannot update the pgd and the ASID atomicly with classic
225 * walking with the wrong TTBR. 231 * MMU, so switch exclusively to global mappings to avoid
232 * speculative page table walking with the wrong TTBR.
226 */ 233 */
227 cpu_set_reserved_ttbr0(); 234 cpu_set_reserved_ttbr0();
228 235
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f61a5707823a..1a77450e728a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -376,7 +376,7 @@ void __init init_dma_coherent_pool_size(unsigned long size)
376static int __init atomic_pool_init(void) 376static int __init atomic_pool_init(void)
377{ 377{
378 struct dma_pool *pool = &atomic_pool; 378 struct dma_pool *pool = &atomic_pool;
379 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 379 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
380 gfp_t gfp = GFP_KERNEL | GFP_DMA; 380 gfp_t gfp = GFP_KERNEL | GFP_DMA;
381 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 381 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
382 unsigned long *bitmap; 382 unsigned long *bitmap;
@@ -624,7 +624,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
624 if (PageHighMem(page)) 624 if (PageHighMem(page))
625 __dma_free_remap(cpu_addr, size); 625 __dma_free_remap(cpu_addr, size);
626 else 626 else
627 __dma_remap(page, size, pgprot_kernel); 627 __dma_remap(page, size, PAGE_KERNEL);
628 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 628 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
629} 629}
630 630
@@ -1351,7 +1351,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1351static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1351static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1352 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1352 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1353{ 1353{
1354 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 1354 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1355 struct page **pages; 1355 struct page **pages;
1356 void *addr = NULL; 1356 void *addr = NULL;
1357 1357
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
new file mode 100644
index 000000000000..2b3a56414271
--- /dev/null
+++ b/arch/arm/mm/dump.c
@@ -0,0 +1,345 @@
1/*
2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
4 *
5 * Derived from x86 implementation:
6 * (C) Copyright 2008 Intel Corporation
7 *
8 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; version 2
13 * of the License.
14 */
15#include <linux/debugfs.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18#include <linux/seq_file.h>
19
20#include <asm/fixmap.h>
21#include <asm/pgtable.h>
22
23struct addr_marker {
24 unsigned long start_address;
25 const char *name;
26};
27
28static struct addr_marker address_markers[] = {
29 { MODULES_VADDR, "Modules" },
30 { PAGE_OFFSET, "Kernel Mapping" },
31 { 0, "vmalloc() Area" },
32 { VMALLOC_END, "vmalloc() End" },
33 { FIXADDR_START, "Fixmap Area" },
34 { CONFIG_VECTORS_BASE, "Vectors" },
35 { CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
36 { -1, NULL },
37};
38
39struct pg_state {
40 struct seq_file *seq;
41 const struct addr_marker *marker;
42 unsigned long start_address;
43 unsigned level;
44 u64 current_prot;
45};
46
47struct prot_bits {
48 u64 mask;
49 u64 val;
50 const char *set;
51 const char *clear;
52};
53
54static const struct prot_bits pte_bits[] = {
55 {
56 .mask = L_PTE_USER,
57 .val = L_PTE_USER,
58 .set = "USR",
59 .clear = " ",
60 }, {
61 .mask = L_PTE_RDONLY,
62 .val = L_PTE_RDONLY,
63 .set = "ro",
64 .clear = "RW",
65 }, {
66 .mask = L_PTE_XN,
67 .val = L_PTE_XN,
68 .set = "NX",
69 .clear = "x ",
70 }, {
71 .mask = L_PTE_SHARED,
72 .val = L_PTE_SHARED,
73 .set = "SHD",
74 .clear = " ",
75 }, {
76 .mask = L_PTE_MT_MASK,
77 .val = L_PTE_MT_UNCACHED,
78 .set = "SO/UNCACHED",
79 }, {
80 .mask = L_PTE_MT_MASK,
81 .val = L_PTE_MT_BUFFERABLE,
82 .set = "MEM/BUFFERABLE/WC",
83 }, {
84 .mask = L_PTE_MT_MASK,
85 .val = L_PTE_MT_WRITETHROUGH,
86 .set = "MEM/CACHED/WT",
87 }, {
88 .mask = L_PTE_MT_MASK,
89 .val = L_PTE_MT_WRITEBACK,
90 .set = "MEM/CACHED/WBRA",
91#ifndef CONFIG_ARM_LPAE
92 }, {
93 .mask = L_PTE_MT_MASK,
94 .val = L_PTE_MT_MINICACHE,
95 .set = "MEM/MINICACHE",
96#endif
97 }, {
98 .mask = L_PTE_MT_MASK,
99 .val = L_PTE_MT_WRITEALLOC,
100 .set = "MEM/CACHED/WBWA",
101 }, {
102 .mask = L_PTE_MT_MASK,
103 .val = L_PTE_MT_DEV_SHARED,
104 .set = "DEV/SHARED",
105#ifndef CONFIG_ARM_LPAE
106 }, {
107 .mask = L_PTE_MT_MASK,
108 .val = L_PTE_MT_DEV_NONSHARED,
109 .set = "DEV/NONSHARED",
110#endif
111 }, {
112 .mask = L_PTE_MT_MASK,
113 .val = L_PTE_MT_DEV_WC,
114 .set = "DEV/WC",
115 }, {
116 .mask = L_PTE_MT_MASK,
117 .val = L_PTE_MT_DEV_CACHED,
118 .set = "DEV/CACHED",
119 },
120};
121
122static const struct prot_bits section_bits[] = {
123#ifndef CONFIG_ARM_LPAE
124 /* These are approximate */
125 {
126 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
127 .val = 0,
128 .set = " ro",
129 }, {
130 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
131 .val = PMD_SECT_AP_WRITE,
132 .set = " RW",
133 }, {
134 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
135 .val = PMD_SECT_AP_READ,
136 .set = "USR ro",
137 }, {
138 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
139 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
140 .set = "USR RW",
141#else
142 {
143 .mask = PMD_SECT_USER,
144 .val = PMD_SECT_USER,
145 .set = "USR",
146 }, {
147 .mask = PMD_SECT_RDONLY,
148 .val = PMD_SECT_RDONLY,
149 .set = "ro",
150 .clear = "RW",
151#endif
152 }, {
153 .mask = PMD_SECT_XN,
154 .val = PMD_SECT_XN,
155 .set = "NX",
156 .clear = "x ",
157 }, {
158 .mask = PMD_SECT_S,
159 .val = PMD_SECT_S,
160 .set = "SHD",
161 .clear = " ",
162 },
163};
164
165struct pg_level {
166 const struct prot_bits *bits;
167 size_t num;
168 u64 mask;
169};
170
171static struct pg_level pg_level[] = {
172 {
173 }, { /* pgd */
174 }, { /* pud */
175 }, { /* pmd */
176 .bits = section_bits,
177 .num = ARRAY_SIZE(section_bits),
178 }, { /* pte */
179 .bits = pte_bits,
180 .num = ARRAY_SIZE(pte_bits),
181 },
182};
183
184static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
185{
186 unsigned i;
187
188 for (i = 0; i < num; i++, bits++) {
189 const char *s;
190
191 if ((st->current_prot & bits->mask) == bits->val)
192 s = bits->set;
193 else
194 s = bits->clear;
195
196 if (s)
197 seq_printf(st->seq, " %s", s);
198 }
199}
200
201static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
202{
203 static const char units[] = "KMGTPE";
204 u64 prot = val & pg_level[level].mask;
205
206 if (addr < USER_PGTABLES_CEILING)
207 return;
208
209 if (!st->level) {
210 st->level = level;
211 st->current_prot = prot;
212 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
213 } else if (prot != st->current_prot || level != st->level ||
214 addr >= st->marker[1].start_address) {
215 const char *unit = units;
216 unsigned long delta;
217
218 if (st->current_prot) {
219 seq_printf(st->seq, "0x%08lx-0x%08lx ",
220 st->start_address, addr);
221
222 delta = (addr - st->start_address) >> 10;
223 while (!(delta & 1023) && unit[1]) {
224 delta >>= 10;
225 unit++;
226 }
227 seq_printf(st->seq, "%9lu%c", delta, *unit);
228 if (pg_level[st->level].bits)
229 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
230 seq_printf(st->seq, "\n");
231 }
232
233 if (addr >= st->marker[1].start_address) {
234 st->marker++;
235 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
236 }
237 st->start_address = addr;
238 st->current_prot = prot;
239 st->level = level;
240 }
241}
242
243static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
244{
245 pte_t *pte = pte_offset_kernel(pmd, 0);
246 unsigned long addr;
247 unsigned i;
248
249 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
250 addr = start + i * PAGE_SIZE;
251 note_page(st, addr, 4, pte_val(*pte));
252 }
253}
254
255static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
256{
257 pmd_t *pmd = pmd_offset(pud, 0);
258 unsigned long addr;
259 unsigned i;
260
261 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
262 addr = start + i * PMD_SIZE;
263 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
264 note_page(st, addr, 3, pmd_val(*pmd));
265 else
266 walk_pte(st, pmd, addr);
267 }
268}
269
270static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
271{
272 pud_t *pud = pud_offset(pgd, 0);
273 unsigned long addr;
274 unsigned i;
275
276 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
277 addr = start + i * PUD_SIZE;
278 if (!pud_none(*pud)) {
279 walk_pmd(st, pud, addr);
280 } else {
281 note_page(st, addr, 2, pud_val(*pud));
282 }
283 }
284}
285
286static void walk_pgd(struct seq_file *m)
287{
288 pgd_t *pgd = swapper_pg_dir;
289 struct pg_state st;
290 unsigned long addr;
291 unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
292
293 memset(&st, 0, sizeof(st));
294 st.seq = m;
295 st.marker = address_markers;
296
297 pgd += pgdoff;
298
299 for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
300 addr = i * PGDIR_SIZE;
301 if (!pgd_none(*pgd)) {
302 walk_pud(&st, pgd, addr);
303 } else {
304 note_page(&st, addr, 1, pgd_val(*pgd));
305 }
306 }
307
308 note_page(&st, 0, 0, 0);
309}
310
311static int ptdump_show(struct seq_file *m, void *v)
312{
313 walk_pgd(m);
314 return 0;
315}
316
317static int ptdump_open(struct inode *inode, struct file *file)
318{
319 return single_open(file, ptdump_show, NULL);
320}
321
322static const struct file_operations ptdump_fops = {
323 .open = ptdump_open,
324 .read = seq_read,
325 .llseek = seq_lseek,
326 .release = single_release,
327};
328
329static int ptdump_init(void)
330{
331 struct dentry *pe;
332 unsigned i, j;
333
334 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
335 if (pg_level[i].bits)
336 for (j = 0; j < pg_level[i].num; j++)
337 pg_level[i].mask |= pg_level[i].bits[j].mask;
338
339 address_markers[2].start_address = VMALLOC_START;
340
341 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
342 &ptdump_fops);
343 return pe ? 0 : -ENOMEM;
344}
345__initcall(ptdump_init);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6d5ba9afb16a..3387e60e4ea3 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
175 unsigned long i; 175 unsigned long i;
176 if (cache_is_vipt_nonaliasing()) { 176 if (cache_is_vipt_nonaliasing()) {
177 for (i = 0; i < (1 << compound_order(page)); i++) { 177 for (i = 0; i < (1 << compound_order(page)); i++) {
178 void *addr = kmap_atomic(page); 178 void *addr = kmap_atomic(page + i);
179 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 179 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
180 kunmap_atomic(addr); 180 kunmap_atomic(addr);
181 } 181 }
182 } else { 182 } else {
183 for (i = 0; i < (1 << compound_order(page)); i++) { 183 for (i = 0; i < (1 << compound_order(page)); i++) {
184 void *addr = kmap_high_get(page); 184 void *addr = kmap_high_get(page + i);
185 if (addr) { 185 if (addr) {
186 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 186 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 kunmap_high(page); 187 kunmap_high(page + i);
188 } 188 }
189 } 189 }
190 } 190 }
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1f7b19a47060..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
229#ifdef CONFIG_ZONE_DMA 229#ifdef CONFIG_ZONE_DMA
230 if (mdesc->dma_zone_size) { 230 if (mdesc->dma_zone_size) {
231 arm_dma_zone_size = mdesc->dma_zone_size; 231 arm_dma_zone_size = mdesc->dma_zone_size;
232 arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; 232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
233 } else 233 } else
234 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index f123d6eb074b..f9c32ba73544 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
392 unsigned int mtype; 392 unsigned int mtype;
393 393
394 if (cached) 394 if (cached)
395 mtype = MT_MEMORY; 395 mtype = MT_MEMORY_RWX;
396 else 396 else
397 mtype = MT_MEMORY_NONCACHED; 397 mtype = MT_MEMORY_RWX_NONCACHED;
398 398
399 return __arm_ioremap_caller(phys_addr, size, mtype, 399 return __arm_ioremap_caller(phys_addr, size, mtype,
400 __builtin_return_address(0)); 400 __builtin_return_address(0));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 580ef2de82d7..4f08c133cc25 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/sections.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
26#include <asm/smp_plat.h> 27#include <asm/smp_plat.h>
27#include <asm/tlb.h> 28#include <asm/tlb.h>
@@ -287,36 +288,43 @@ static struct mem_type mem_types[] = {
287 .prot_l1 = PMD_TYPE_TABLE, 288 .prot_l1 = PMD_TYPE_TABLE,
288 .domain = DOMAIN_USER, 289 .domain = DOMAIN_USER,
289 }, 290 },
290 [MT_MEMORY] = { 291 [MT_MEMORY_RWX] = {
291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 292 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
292 .prot_l1 = PMD_TYPE_TABLE, 293 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 294 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
294 .domain = DOMAIN_KERNEL, 295 .domain = DOMAIN_KERNEL,
295 }, 296 },
297 [MT_MEMORY_RW] = {
298 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
299 L_PTE_XN,
300 .prot_l1 = PMD_TYPE_TABLE,
301 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
302 .domain = DOMAIN_KERNEL,
303 },
296 [MT_ROM] = { 304 [MT_ROM] = {
297 .prot_sect = PMD_TYPE_SECT, 305 .prot_sect = PMD_TYPE_SECT,
298 .domain = DOMAIN_KERNEL, 306 .domain = DOMAIN_KERNEL,
299 }, 307 },
300 [MT_MEMORY_NONCACHED] = { 308 [MT_MEMORY_RWX_NONCACHED] = {
301 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 309 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
302 L_PTE_MT_BUFFERABLE, 310 L_PTE_MT_BUFFERABLE,
303 .prot_l1 = PMD_TYPE_TABLE, 311 .prot_l1 = PMD_TYPE_TABLE,
304 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 312 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
305 .domain = DOMAIN_KERNEL, 313 .domain = DOMAIN_KERNEL,
306 }, 314 },
307 [MT_MEMORY_DTCM] = { 315 [MT_MEMORY_RW_DTCM] = {
308 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 316 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
309 L_PTE_XN, 317 L_PTE_XN,
310 .prot_l1 = PMD_TYPE_TABLE, 318 .prot_l1 = PMD_TYPE_TABLE,
311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 319 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
312 .domain = DOMAIN_KERNEL, 320 .domain = DOMAIN_KERNEL,
313 }, 321 },
314 [MT_MEMORY_ITCM] = { 322 [MT_MEMORY_RWX_ITCM] = {
315 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 323 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
316 .prot_l1 = PMD_TYPE_TABLE, 324 .prot_l1 = PMD_TYPE_TABLE,
317 .domain = DOMAIN_KERNEL, 325 .domain = DOMAIN_KERNEL,
318 }, 326 },
319 [MT_MEMORY_SO] = { 327 [MT_MEMORY_RW_SO] = {
320 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
321 L_PTE_MT_UNCACHED | L_PTE_XN, 329 L_PTE_MT_UNCACHED | L_PTE_XN,
322 .prot_l1 = PMD_TYPE_TABLE, 330 .prot_l1 = PMD_TYPE_TABLE,
@@ -325,7 +333,8 @@ static struct mem_type mem_types[] = {
325 .domain = DOMAIN_KERNEL, 333 .domain = DOMAIN_KERNEL,
326 }, 334 },
327 [MT_MEMORY_DMA_READY] = { 335 [MT_MEMORY_DMA_READY] = {
328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 336 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
337 L_PTE_XN,
329 .prot_l1 = PMD_TYPE_TABLE, 338 .prot_l1 = PMD_TYPE_TABLE,
330 .domain = DOMAIN_KERNEL, 339 .domain = DOMAIN_KERNEL,
331 }, 340 },
@@ -337,6 +346,44 @@ const struct mem_type *get_mem_type(unsigned int type)
337} 346}
338EXPORT_SYMBOL(get_mem_type); 347EXPORT_SYMBOL(get_mem_type);
339 348
349#define PTE_SET_FN(_name, pteop) \
350static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
351 void *data) \
352{ \
353 pte_t pte = pteop(*ptep); \
354\
355 set_pte_ext(ptep, pte, 0); \
356 return 0; \
357} \
358
359#define SET_MEMORY_FN(_name, callback) \
360int set_memory_##_name(unsigned long addr, int numpages) \
361{ \
362 unsigned long start = addr; \
363 unsigned long size = PAGE_SIZE*numpages; \
364 unsigned end = start + size; \
365\
366 if (start < MODULES_VADDR || start >= MODULES_END) \
367 return -EINVAL;\
368\
369 if (end < MODULES_VADDR || end >= MODULES_END) \
370 return -EINVAL; \
371\
372 apply_to_page_range(&init_mm, start, size, callback, NULL); \
373 flush_tlb_kernel_range(start, end); \
374 return 0;\
375}
376
377PTE_SET_FN(ro, pte_wrprotect)
378PTE_SET_FN(rw, pte_mkwrite)
379PTE_SET_FN(x, pte_mkexec)
380PTE_SET_FN(nx, pte_mknexec)
381
382SET_MEMORY_FN(ro, pte_set_ro)
383SET_MEMORY_FN(rw, pte_set_rw)
384SET_MEMORY_FN(x, pte_set_x)
385SET_MEMORY_FN(nx, pte_set_nx)
386
340/* 387/*
341 * Adjust the PMD section entries according to the CPU in use. 388 * Adjust the PMD section entries according to the CPU in use.
342 */ 389 */
@@ -410,6 +457,9 @@ static void __init build_mem_type_table(void)
410 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 457 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
411 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 458 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
412 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 459 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
460
461 /* Also setup NX memory mapping */
462 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
413 } 463 }
414 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 464 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
415 /* 465 /*
@@ -487,11 +537,13 @@ static void __init build_mem_type_table(void)
487 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 537 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
488 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 538 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
489 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 539 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
490 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 540 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
491 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 541 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
542 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
543 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
492 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 544 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
493 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 545 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
494 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 546 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
495 } 547 }
496 } 548 }
497 549
@@ -502,15 +554,15 @@ static void __init build_mem_type_table(void)
502 if (cpu_arch >= CPU_ARCH_ARMv6) { 554 if (cpu_arch >= CPU_ARCH_ARMv6) {
503 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 555 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
504 /* Non-cacheable Normal is XCB = 001 */ 556 /* Non-cacheable Normal is XCB = 001 */
505 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 557 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
506 PMD_SECT_BUFFERED; 558 PMD_SECT_BUFFERED;
507 } else { 559 } else {
508 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 560 /* For both ARMv6 and non-TEX-remapping ARMv7 */
509 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 561 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
510 PMD_SECT_TEX(1); 562 PMD_SECT_TEX(1);
511 } 563 }
512 } else { 564 } else {
513 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 565 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
514 } 566 }
515 567
516#ifdef CONFIG_ARM_LPAE 568#ifdef CONFIG_ARM_LPAE
@@ -543,10 +595,12 @@ static void __init build_mem_type_table(void)
543 595
544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 596 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 597 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
546 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 598 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
547 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 599 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
600 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
601 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
548 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 602 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
549 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 603 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
550 mem_types[MT_ROM].prot_sect |= cp->pmd; 604 mem_types[MT_ROM].prot_sect |= cp->pmd;
551 605
552 switch (cp->pmd) { 606 switch (cp->pmd) {
@@ -1296,6 +1350,8 @@ static void __init kmap_init(void)
1296static void __init map_lowmem(void) 1350static void __init map_lowmem(void)
1297{ 1351{
1298 struct memblock_region *reg; 1352 struct memblock_region *reg;
1353 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1354 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1299 1355
1300 /* Map all the lowmem memory banks. */ 1356 /* Map all the lowmem memory banks. */
1301 for_each_memblock(memory, reg) { 1357 for_each_memblock(memory, reg) {
@@ -1308,12 +1364,40 @@ static void __init map_lowmem(void)
1308 if (start >= end) 1364 if (start >= end)
1309 break; 1365 break;
1310 1366
1311 map.pfn = __phys_to_pfn(start); 1367 if (end < kernel_x_start || start >= kernel_x_end) {
1312 map.virtual = __phys_to_virt(start); 1368 map.pfn = __phys_to_pfn(start);
1313 map.length = end - start; 1369 map.virtual = __phys_to_virt(start);
1314 map.type = MT_MEMORY; 1370 map.length = end - start;
1371 map.type = MT_MEMORY_RWX;
1315 1372
1316 create_mapping(&map); 1373 create_mapping(&map);
1374 } else {
1375 /* This better cover the entire kernel */
1376 if (start < kernel_x_start) {
1377 map.pfn = __phys_to_pfn(start);
1378 map.virtual = __phys_to_virt(start);
1379 map.length = kernel_x_start - start;
1380 map.type = MT_MEMORY_RW;
1381
1382 create_mapping(&map);
1383 }
1384
1385 map.pfn = __phys_to_pfn(kernel_x_start);
1386 map.virtual = __phys_to_virt(kernel_x_start);
1387 map.length = kernel_x_end - kernel_x_start;
1388 map.type = MT_MEMORY_RWX;
1389
1390 create_mapping(&map);
1391
1392 if (kernel_x_end < end) {
1393 map.pfn = __phys_to_pfn(kernel_x_end);
1394 map.virtual = __phys_to_virt(kernel_x_end);
1395 map.length = end - kernel_x_end;
1396 map.type = MT_MEMORY_RW;
1397
1398 create_mapping(&map);
1399 }
1400 }
1317 } 1401 }
1318} 1402}
1319 1403
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 1046b373d1ae..249379535be2 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -23,7 +23,7 @@
23#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) 23#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
24#define __pgd_free(pgd) kfree(pgd) 24#define __pgd_free(pgd) kfree(pgd)
25#else 25#else
26#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) 26#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2)
27#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) 27#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
28#endif 28#endif
29 29