aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorDeepak Saxena <dsaxena@plexity.net>2005-10-28 10:19:11 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-10-28 10:19:11 -0400
commit9769c2468d423a1562dd59a5db250bd0a5533ec9 (patch)
treed584ad444ed8bd5d1abfc197c918dfc6a9af7ddb /arch/arm/mm
parentc8d2729858d76de4ef7522c8171004fc1959cc44 (diff)
[ARM] 3016/1: Replace map_desc.physical with map_desc.pfn
Patch from Deepak Saxena Convert map_desc.physical to map_desc.pfn. This allows us to add support for 36-bit addressed physical devices in the static maps without having to resort to u64 variables. Signed-off-by: Deepak Saxena <dsaxena@plexity.net> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/mm-armv.c8
2 files changed, 10 insertions, 10 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index d1f1ec73500f..f4496813615a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -262,8 +262,8 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
262 if (end_pfn < end) 262 if (end_pfn < end)
263 end_pfn = end; 263 end_pfn = end;
264 264
265 map.physical = mi->bank[i].start; 265 map.pfn = __phys_to_pfn(mi->bank[i].start);
266 map.virtual = __phys_to_virt(map.physical); 266 map.virtual = __phys_to_virt(mi->bank[i].start);
267 map.length = mi->bank[i].size; 267 map.length = mi->bank[i].size;
268 map.type = MT_MEMORY; 268 map.type = MT_MEMORY;
269 269
@@ -365,7 +365,7 @@ static void __init bootmem_init(struct meminfo *mi)
365 365
366#ifdef CONFIG_XIP_KERNEL 366#ifdef CONFIG_XIP_KERNEL
367#error needs fixing 367#error needs fixing
368 p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; 368 p->pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PMD_MASK);
369 p->virtual = (unsigned long)&_stext & PMD_MASK; 369 p->virtual = (unsigned long)&_stext & PMD_MASK;
370 p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; 370 p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
371 p->type = MT_ROM; 371 p->type = MT_ROM;
@@ -439,14 +439,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
439 * Map the cache flushing regions. 439 * Map the cache flushing regions.
440 */ 440 */
441#ifdef FLUSH_BASE 441#ifdef FLUSH_BASE
442 map.physical = FLUSH_BASE_PHYS; 442 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
443 map.virtual = FLUSH_BASE; 443 map.virtual = FLUSH_BASE;
444 map.length = PGDIR_SIZE; 444 map.length = PGDIR_SIZE;
445 map.type = MT_CACHECLEAN; 445 map.type = MT_CACHECLEAN;
446 create_mapping(&map); 446 create_mapping(&map);
447#endif 447#endif
448#ifdef FLUSH_BASE_MINICACHE 448#ifdef FLUSH_BASE_MINICACHE
449 map.physical = FLUSH_BASE_PHYS + PGDIR_SIZE; 449 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + PGDIR_SIZE);
450 map.virtual = FLUSH_BASE_MINICACHE; 450 map.virtual = FLUSH_BASE_MINICACHE;
451 map.length = PGDIR_SIZE; 451 map.length = PGDIR_SIZE;
452 map.type = MT_MINICLEAN; 452 map.type = MT_MINICLEAN;
@@ -464,7 +464,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
464 * location (0xffff0000). If we aren't using high-vectors, also 464 * location (0xffff0000). If we aren't using high-vectors, also
465 * create a mapping at the low-vectors virtual address. 465 * create a mapping at the low-vectors virtual address.
466 */ 466 */
467 map.physical = virt_to_phys(vectors); 467 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
468 map.virtual = 0xffff0000; 468 map.virtual = 0xffff0000;
469 map.length = PAGE_SIZE; 469 map.length = PAGE_SIZE;
470 map.type = MT_HIGH_VECTORS; 470 map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index c626361c0f5e..64db10e806b3 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -483,7 +483,7 @@ void __init create_mapping(struct map_desc *md)
483 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 483 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
484 printk(KERN_WARNING "BUG: not creating mapping for " 484 printk(KERN_WARNING "BUG: not creating mapping for "
485 "0x%08lx at 0x%08lx in user region\n", 485 "0x%08lx at 0x%08lx in user region\n",
486 md->physical, md->virtual); 486 __pfn_to_phys(md->pfn), md->virtual);
487 return; 487 return;
488 } 488 }
489 489
@@ -491,7 +491,7 @@ void __init create_mapping(struct map_desc *md)
491 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 491 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
492 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " 492 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
493 "overlaps vmalloc space\n", 493 "overlaps vmalloc space\n",
494 md->physical, md->virtual); 494 __pfn_to_phys(md->pfn), md->virtual);
495 } 495 }
496 496
497 domain = mem_types[md->type].domain; 497 domain = mem_types[md->type].domain;
@@ -500,14 +500,14 @@ void __init create_mapping(struct map_desc *md)
500 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); 500 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
501 501
502 virt = md->virtual; 502 virt = md->virtual;
503 off = md->physical - virt; 503 off = __pfn_to_phys(md->pfn) - virt;
504 length = md->length; 504 length = md->length;
505 505
506 if (mem_types[md->type].prot_l1 == 0 && 506 if (mem_types[md->type].prot_l1 == 0 &&
507 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { 507 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
508 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 508 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
509 "be mapped using pages, ignoring.\n", 509 "be mapped using pages, ignoring.\n",
510 md->physical, md->virtual); 510 __pfn_to_phys(md->pfn), md->virtual);
511 return; 511 return;
512 } 512 }
513 513