aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 15:42:27 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-04-02 12:13:12 -0400
commit7a7039ee71811222310b431aee246eb78dd0d401 (patch)
tree14d3c560a0053f88a3be2d7a7c31749b148c5d83 /arch
parentb230ff2d5c53bb79e1121554cd3c827e5c1b70fd (diff)
arch/tile: fix bug in loading kernels larger than 16 MB
Previously we only handled kernels up to a single huge page in size. Now we create additional PTEs appropriately. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/tile/mm/init.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 830c4908ea76..8400d3fb9e0a 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
557 557
558 address = MEM_SV_INTRPT; 558 address = MEM_SV_INTRPT;
559 pmd = get_pmd(pgtables, address); 559 pmd = get_pmd(pgtables, address);
560 pfn = 0; /* code starts at PA 0 */
560 if (ktext_small) { 561 if (ktext_small) {
561 /* Allocate an L2 PTE for the kernel text */ 562 /* Allocate an L2 PTE for the kernel text */
562 int cpu = 0; 563 int cpu = 0;
@@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
579 } 580 }
580 581
581 BUG_ON(address != (unsigned long)_stext); 582 BUG_ON(address != (unsigned long)_stext);
582 pfn = 0; /* code starts at PA 0 */ 583 pte = NULL;
583 pte = alloc_pte(); 584 for (; address < (unsigned long)_einittext;
584 for (pte_ofs = 0; address < (unsigned long)_einittext; 585 pfn++, address += PAGE_SIZE) {
585 pfn++, pte_ofs++, address += PAGE_SIZE) { 586 pte_ofs = pte_index(address);
587 if (pte_ofs == 0) {
588 if (pte)
589 assign_pte(pmd++, pte);
590 pte = alloc_pte();
591 }
586 if (!ktext_local) { 592 if (!ktext_local) {
587 prot = set_remote_cache_cpu(prot, cpu); 593 prot = set_remote_cache_cpu(prot, cpu);
588 cpu = cpumask_next(cpu, &ktext_mask); 594 cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +597,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
591 } 597 }
592 pte[pte_ofs] = pfn_pte(pfn, prot); 598 pte[pte_ofs] = pfn_pte(pfn, prot);
593 } 599 }
594 assign_pte(pmd, pte); 600 if (pte)
601 assign_pte(pmd, pte);
595 } else { 602 } else {
596 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 603 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
597 pteval = pte_mkhuge(pteval); 604 pteval = pte_mkhuge(pteval);
@@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
614 else 621 else
615 pteval = hv_pte_set_mode(pteval, 622 pteval = hv_pte_set_mode(pteval,
616 HV_PTE_MODE_CACHE_NO_L3); 623 HV_PTE_MODE_CACHE_NO_L3);
617 *(pte_t *)pmd = pteval; 624 for (; address < (unsigned long)_einittext;
625 pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
626 *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
618 } 627 }
619 628
620 /* Set swapper_pgprot here so it is flushed to memory right away. */ 629 /* Set swapper_pgprot here so it is flushed to memory right away. */