aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-11-03 05:12:13 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-11-03 05:12:13 -0500
commit06e944b8e5fc4bec83f102f98c1ee4f972f5f072 (patch)
treed53b1c3ca270f49f1cae63bbe117cc8587e51510 /arch/arm/mm/mmu.c
parentf114040e3ea6e07372334ade75d1ee0775c355e1 (diff)
parent80d6b0c2eed2a504f6740cd1f5ea76dc50abfc4d (diff)
Merge tag 'ronx-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into devel-stable
generic fixmaps ARM support for CONFIG_DEBUG_RODATA
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..a7b12cb21e81 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/smp_plat.h> 28#include <asm/smp_plat.h>
@@ -393,6 +394,29 @@ SET_MEMORY_FN(x, pte_set_x)
393SET_MEMORY_FN(nx, pte_set_nx) 394SET_MEMORY_FN(nx, pte_set_nx)
394 395
395/* 396/*
397 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
398 * As a result, this can only be called with preemption disabled, as under
399 * stop_machine().
400 */
401void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
402{
403 unsigned long vaddr = __fix_to_virt(idx);
404 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
405
406 /* Make sure fixmap region does not exceed available allocation. */
407 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
408 FIXADDR_END);
409 BUG_ON(idx >= __end_of_fixed_addresses);
410
411 if (pgprot_val(prot))
412 set_pte_at(NULL, vaddr, pte,
413 pfn_pte(phys >> PAGE_SHIFT, prot));
414 else
415 pte_clear(NULL, vaddr, pte);
416 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
417}
418
419/*
396 * Adjust the PMD section entries according to the CPU in use. 420 * Adjust the PMD section entries according to the CPU in use.
397 */ 421 */
398static void __init build_mem_type_table(void) 422static void __init build_mem_type_table(void)
@@ -1326,10 +1350,10 @@ static void __init kmap_init(void)
1326#ifdef CONFIG_HIGHMEM 1350#ifdef CONFIG_HIGHMEM
1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1351 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1328 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1352 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1332#endif 1353#endif
1354
1355 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1356 _PAGE_KERNEL_TABLE);
1333} 1357}
1334 1358
1335static void __init map_lowmem(void) 1359static void __init map_lowmem(void)
@@ -1349,13 +1373,20 @@ static void __init map_lowmem(void)
1349 if (start >= end) 1373 if (start >= end)
1350 break; 1374 break;
1351 1375
1352 if (end < kernel_x_start || start >= kernel_x_end) { 1376 if (end < kernel_x_start) {
1353 map.pfn = __phys_to_pfn(start); 1377 map.pfn = __phys_to_pfn(start);
1354 map.virtual = __phys_to_virt(start); 1378 map.virtual = __phys_to_virt(start);
1355 map.length = end - start; 1379 map.length = end - start;
1356 map.type = MT_MEMORY_RWX; 1380 map.type = MT_MEMORY_RWX;
1357 1381
1358 create_mapping(&map); 1382 create_mapping(&map);
1383 } else if (start >= kernel_x_end) {
1384 map.pfn = __phys_to_pfn(start);
1385 map.virtual = __phys_to_virt(start);
1386 map.length = end - start;
1387 map.type = MT_MEMORY_RW;
1388
1389 create_mapping(&map);
1359 } else { 1390 } else {
1360 /* This better cover the entire kernel */ 1391 /* This better cover the entire kernel */
1361 if (start < kernel_x_start) { 1392 if (start < kernel_x_start) {