aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-12-05 11:30:54 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-12-05 11:30:54 -0500
commite9f2d6d66037cdf97487491e04053f411abc5d16 (patch)
tree123cec080d17fb74a2531d8cc7ad1cf44bbad9ec /arch/arm/mm/mmu.c
parentfbe4dd088f449cbae586aa8af51d271297c75f9f (diff)
parent06e944b8e5fc4bec83f102f98c1ee4f972f5f072 (diff)
Merge branch 'devel-stable' into for-next
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f86ce1a9f525..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/smp_plat.h> 28#include <asm/smp_plat.h>
@@ -357,6 +358,29 @@ const struct mem_type *get_mem_type(unsigned int type)
357EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
358 359
359/* 360/*
361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
362 * As a result, this can only be called with preemption disabled, as under
363 * stop_machine().
364 */
365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
366{
367 unsigned long vaddr = __fix_to_virt(idx);
368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
369
370 /* Make sure fixmap region does not exceed available allocation. */
371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
372 FIXADDR_END);
373 BUG_ON(idx >= __end_of_fixed_addresses);
374
375 if (pgprot_val(prot))
376 set_pte_at(NULL, vaddr, pte,
377 pfn_pte(phys >> PAGE_SHIFT, prot));
378 else
379 pte_clear(NULL, vaddr, pte);
380 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
381}
382
383/*
360 * Adjust the PMD section entries according to the CPU in use. 384 * Adjust the PMD section entries according to the CPU in use.
361 */ 385 */
362static void __init build_mem_type_table(void) 386static void __init build_mem_type_table(void)
@@ -1296,10 +1320,10 @@ static void __init kmap_init(void)
1296#ifdef CONFIG_HIGHMEM 1320#ifdef CONFIG_HIGHMEM
1297 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1321 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1298 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1322 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1299
1300 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1301 FIXADDR_START, _PAGE_KERNEL_TABLE);
1302#endif 1323#endif
1324
1325 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1326 _PAGE_KERNEL_TABLE);
1303} 1327}
1304 1328
1305static void __init map_lowmem(void) 1329static void __init map_lowmem(void)
@@ -1319,13 +1343,20 @@ static void __init map_lowmem(void)
1319 if (start >= end) 1343 if (start >= end)
1320 break; 1344 break;
1321 1345
1322 if (end < kernel_x_start || start >= kernel_x_end) { 1346 if (end < kernel_x_start) {
1323 map.pfn = __phys_to_pfn(start); 1347 map.pfn = __phys_to_pfn(start);
1324 map.virtual = __phys_to_virt(start); 1348 map.virtual = __phys_to_virt(start);
1325 map.length = end - start; 1349 map.length = end - start;
1326 map.type = MT_MEMORY_RWX; 1350 map.type = MT_MEMORY_RWX;
1327 1351
1328 create_mapping(&map); 1352 create_mapping(&map);
1353 } else if (start >= kernel_x_end) {
1354 map.pfn = __phys_to_pfn(start);
1355 map.virtual = __phys_to_virt(start);
1356 map.length = end - start;
1357 map.type = MT_MEMORY_RW;
1358
1359 create_mapping(&map);
1329 } else { 1360 } else {
1330 /* This better cover the entire kernel */ 1361 /* This better cover the entire kernel */
1331 if (start < kernel_x_start) { 1362 if (start < kernel_x_start) {