diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2013-08-12 15:08:09 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2013-09-03 14:52:13 -0400 |
commit | 640710a33b54de8d90ae140ef633ed0feba76a75 (patch) | |
tree | c58e0f7b36fef258674ddf9d5425608d3b5a430e /arch | |
parent | 49cf78ef7bb34833496d59b6dfe84ae51b1ab097 (diff) |
tile: add virt_to_kpte() API and clean up and document behavior
We use virt_to_pte(NULL, va) a lot, which isn't very obvious.
I added virt_to_kpte(va) as a more obvious wrapper function,
that also validates the va as being a kernel adddress.
And, I fixed the semantics of virt_to_pte() so that we handle
the pud and pmd the same way, and we now document the fact that
we handle the final pte level differently.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/tile/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/page.h | 1 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/tile/mm/homecache.c | 6 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 4 | ||||
-rw-r--r-- | arch/tile/mm/pgtable.c | 22 |
6 files changed, 30 insertions, 11 deletions
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 37f0b741dee7..4734215e2ad4 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | |||
45 | 45 | ||
46 | static inline void install_page_table(pgd_t *pgdir, int asid) | 46 | static inline void install_page_table(pgd_t *pgdir, int asid) |
47 | { | 47 | { |
48 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | 48 | pte_t *ptep = virt_to_kpte((unsigned long)pgdir); |
49 | __install_page_table(pgdir, asid, *ptep); | 49 | __install_page_table(pgdir, asid, *ptep); |
50 | } | 50 | } |
51 | 51 | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 980843dd983e..6346888f7bdc 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h | |||
@@ -328,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn) | |||
328 | 328 | ||
329 | struct mm_struct; | 329 | struct mm_struct; |
330 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | 330 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); |
331 | extern pte_t *virt_to_kpte(unsigned long kaddr); | ||
331 | 332 | ||
332 | #endif /* !__ASSEMBLY__ */ | 333 | #endif /* !__ASSEMBLY__ */ |
333 | 334 | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 10217844052a..b79c312ca3cb 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1600,7 +1600,7 @@ void __init setup_per_cpu_areas(void) | |||
1600 | 1600 | ||
1601 | /* Update the vmalloc mapping and page home. */ | 1601 | /* Update the vmalloc mapping and page home. */ |
1602 | unsigned long addr = (unsigned long)ptr + i; | 1602 | unsigned long addr = (unsigned long)ptr + i; |
1603 | pte_t *ptep = virt_to_pte(NULL, addr); | 1603 | pte_t *ptep = virt_to_kpte(addr); |
1604 | pte_t pte = *ptep; | 1604 | pte_t pte = *ptep; |
1605 | BUG_ON(pfn != pte_pfn(pte)); | 1605 | BUG_ON(pfn != pte_pfn(pte)); |
1606 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | 1606 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); |
@@ -1609,12 +1609,12 @@ void __init setup_per_cpu_areas(void) | |||
1609 | 1609 | ||
1610 | /* Update the lowmem mapping for consistency. */ | 1610 | /* Update the lowmem mapping for consistency. */ |
1611 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | 1611 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); |
1612 | ptep = virt_to_pte(NULL, lowmem_va); | 1612 | ptep = virt_to_kpte(lowmem_va); |
1613 | if (pte_huge(*ptep)) { | 1613 | if (pte_huge(*ptep)) { |
1614 | printk(KERN_DEBUG "early shatter of huge page" | 1614 | printk(KERN_DEBUG "early shatter of huge page" |
1615 | " at %#lx\n", lowmem_va); | 1615 | " at %#lx\n", lowmem_va); |
1616 | shatter_pmd((pmd_t *)ptep); | 1616 | shatter_pmd((pmd_t *)ptep); |
1617 | ptep = virt_to_pte(NULL, lowmem_va); | 1617 | ptep = virt_to_kpte(lowmem_va); |
1618 | BUG_ON(pte_huge(*ptep)); | 1618 | BUG_ON(pte_huge(*ptep)); |
1619 | } | 1619 | } |
1620 | BUG_ON(pfn != pte_pfn(*ptep)); | 1620 | BUG_ON(pfn != pte_pfn(*ptep)); |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index df46a2d5bdf0..e3ee55b0327a 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -200,7 +200,7 @@ void homecache_finv_map_page(struct page *page, int home) | |||
200 | #else | 200 | #else |
201 | va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); | 201 | va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); |
202 | #endif | 202 | #endif |
203 | ptep = virt_to_pte(NULL, (unsigned long)va); | 203 | ptep = virt_to_kpte(va); |
204 | pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); | 204 | pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); |
205 | __set_pte(ptep, pte_set_home(pte, home)); | 205 | __set_pte(ptep, pte_set_home(pte, home)); |
206 | homecache_finv_page_va((void *)va, home); | 206 | homecache_finv_page_va((void *)va, home); |
@@ -385,7 +385,7 @@ int page_home(struct page *page) | |||
385 | return initial_page_home(); | 385 | return initial_page_home(); |
386 | } else { | 386 | } else { |
387 | unsigned long kva = (unsigned long)page_address(page); | 387 | unsigned long kva = (unsigned long)page_address(page); |
388 | return pte_to_home(*virt_to_pte(NULL, kva)); | 388 | return pte_to_home(*virt_to_kpte(kva)); |
389 | } | 389 | } |
390 | } | 390 | } |
391 | EXPORT_SYMBOL(page_home); | 391 | EXPORT_SYMBOL(page_home); |
@@ -404,7 +404,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
404 | NULL, 0); | 404 | NULL, 0); |
405 | 405 | ||
406 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { | 406 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { |
407 | pte_t *ptep = virt_to_pte(NULL, kva); | 407 | pte_t *ptep = virt_to_kpte(kva); |
408 | pte_t pteval = *ptep; | 408 | pte_t pteval = *ptep; |
409 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); | 409 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); |
410 | __set_pte(ptep, pte_set_home(pteval, home)); | 410 | __set_pte(ptep, pte_set_home(pteval, home)); |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index c6d21601ec4d..c8f58c12866d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -951,7 +951,7 @@ static void mark_w1data_ro(void) | |||
951 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); | 951 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); |
952 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { | 952 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { |
953 | unsigned long pfn = kaddr_to_pfn((void *)addr); | 953 | unsigned long pfn = kaddr_to_pfn((void *)addr); |
954 | pte_t *ptep = virt_to_pte(NULL, addr); | 954 | pte_t *ptep = virt_to_kpte(addr); |
955 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ | 955 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ |
956 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); | 956 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); |
957 | } | 957 | } |
@@ -997,7 +997,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
997 | */ | 997 | */ |
998 | int pfn = kaddr_to_pfn((void *)addr); | 998 | int pfn = kaddr_to_pfn((void *)addr); |
999 | struct page *page = pfn_to_page(pfn); | 999 | struct page *page = pfn_to_page(pfn); |
1000 | pte_t *ptep = virt_to_pte(NULL, addr); | 1000 | pte_t *ptep = virt_to_kpte(addr); |
1001 | if (!initfree) { | 1001 | if (!initfree) { |
1002 | /* | 1002 | /* |
1003 | * If debugging page accesses then do not free | 1003 | * If debugging page accesses then do not free |
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 300443389671..2deaddf3e01f 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
@@ -325,6 +325,17 @@ void ptep_set_wrprotect(struct mm_struct *mm, | |||
325 | 325 | ||
326 | #endif | 326 | #endif |
327 | 327 | ||
328 | /* | ||
329 | * Return a pointer to the PTE that corresponds to the given | ||
330 | * address in the given page table. A NULL page table just uses | ||
331 | * the standard kernel page table; the preferred API in this case | ||
332 | * is virt_to_kpte(). | ||
333 | * | ||
334 | * The returned pointer can point to a huge page in other levels | ||
335 | * of the page table than the bottom, if the huge page is present | ||
336 | * in the page table. For bottom-level PTEs, the returned pointer | ||
337 | * can point to a PTE that is either present or not. | ||
338 | */ | ||
328 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | 339 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) |
329 | { | 340 | { |
330 | pgd_t *pgd; | 341 | pgd_t *pgd; |
@@ -341,14 +352,21 @@ pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | |||
341 | if (pud_huge_page(*pud)) | 352 | if (pud_huge_page(*pud)) |
342 | return (pte_t *)pud; | 353 | return (pte_t *)pud; |
343 | pmd = pmd_offset(pud, addr); | 354 | pmd = pmd_offset(pud, addr); |
344 | if (pmd_huge_page(*pmd)) | ||
345 | return (pte_t *)pmd; | ||
346 | if (!pmd_present(*pmd)) | 355 | if (!pmd_present(*pmd)) |
347 | return NULL; | 356 | return NULL; |
357 | if (pmd_huge_page(*pmd)) | ||
358 | return (pte_t *)pmd; | ||
348 | return pte_offset_kernel(pmd, addr); | 359 | return pte_offset_kernel(pmd, addr); |
349 | } | 360 | } |
350 | EXPORT_SYMBOL(virt_to_pte); | 361 | EXPORT_SYMBOL(virt_to_pte); |
351 | 362 | ||
363 | pte_t *virt_to_kpte(unsigned long kaddr) | ||
364 | { | ||
365 | BUG_ON(kaddr < PAGE_OFFSET); | ||
366 | return virt_to_pte(NULL, kaddr); | ||
367 | } | ||
368 | EXPORT_SYMBOL(virt_to_kpte); | ||
369 | |||
352 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) | 370 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) |
353 | { | 371 | { |
354 | unsigned int width = smp_width; | 372 | unsigned int width = smp_width; |