aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:48:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:48:14 -0500
commit140cd7fb04a4a2bc09a30980bc8104cc89e09330 (patch)
tree776d57c7508f946d592de4334d4d3cb50fd36220 /arch/powerpc/mm/hugetlbpage.c
parent27afc5dbda52ee3dbcd0bda7375c917c6936b470 (diff)
parent56548fc0e86cb9156af7a7e1f15ba78f251dafaf (diff)
Merge tag 'powerpc-3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: "Some nice cleanups like removing bootmem, and removal of __get_cpu_var(). There is one patch to mm/gup.c. This is the generic GUP implementation, but is only used by us and arm(64). We have an ack from Steve Capper, and although we didn't get an ack from Andrew he told us to take the patch through the powerpc tree. There's one cxl patch. This is in drivers/misc, but Greg said he was happy for us to manage fixes for it. There is an infrastructure patch to support an IPMI driver for OPAL. There is also an RTC driver for OPAL. We weren't able to get any response from the RTC maintainer, Alessandro Zummo, so in the end we just merged the driver. The usual batch of Freescale updates from Scott" * tag 'powerpc-3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (101 commits) powerpc/powernv: Return to cpu offline loop when finished in KVM guest powerpc/book3s: Fix partial invalidation of TLBs in MCE code. powerpc/mm: don't do tlbie for updatepp request with NO HPTE fault powerpc/xmon: Cleanup the breakpoint flags powerpc/xmon: Enable HW instruction breakpoint on POWER8 powerpc/mm/thp: Use tlbiel if possible powerpc/mm/thp: Remove code duplication powerpc/mm/hugetlb: Sanity check gigantic hugepage count powerpc/oprofile: Disable pagefaults during user stack read powerpc/mm: Check for matching hpte without taking hpte lock powerpc: Drop useless warning in eeh_init() powerpc/powernv: Cleanup unused MCE definitions/declarations. powerpc/eeh: Dump PHB diag-data early powerpc/eeh: Recover EEH error on ownership change for BCM5719 powerpc/eeh: Set EEH_PE_RESET on PE reset powerpc/eeh: Refactor eeh_reset_pe() powerpc: Remove more traces of bootmem powerpc/pseries: Initialise nvram_pstore_info's buf_lock cxl: Name interrupts in /proc/interrupt cxl: Return error to PSL if IRQ demultiplexing fails & print clearer warning ...
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6a4a5fcb9730..5ff4e07d920a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -62,6 +62,9 @@ static unsigned nr_gpages;
62/* 62/*
63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have 63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; 64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
65 *
66 * Defined in such a way that we can optimize away code block at build time
67 * if CONFIG_HUGETLB_PAGE=n.
65 */ 68 */
66int pmd_huge(pmd_t pmd) 69int pmd_huge(pmd_t pmd)
67{ 70{
@@ -230,7 +233,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
230 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) 233 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
231 return NULL; 234 return NULL;
232 235
233 return hugepte_offset(hpdp, addr, pdshift); 236 return hugepte_offset(*hpdp, addr, pdshift);
234} 237}
235 238
236#else 239#else
@@ -270,13 +273,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
270 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) 273 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
271 return NULL; 274 return NULL;
272 275
273 return hugepte_offset(hpdp, addr, pdshift); 276 return hugepte_offset(*hpdp, addr, pdshift);
274} 277}
275#endif 278#endif
276 279
277#ifdef CONFIG_PPC_FSL_BOOK3E 280#ifdef CONFIG_PPC_FSL_BOOK3E
278/* Build list of addresses of gigantic pages. This function is used in early 281/* Build list of addresses of gigantic pages. This function is used in early
279 * boot before the buddy or bootmem allocator is setup. 282 * boot before the buddy allocator is setup.
280 */ 283 */
281void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) 284void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
282{ 285{
@@ -312,7 +315,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
312 * If gpages can be in highmem we can't use the trick of storing the 315 * If gpages can be in highmem we can't use the trick of storing the
313 * data structure in the page; allocate space for this 316 * data structure in the page; allocate space for this
314 */ 317 */
315 m = alloc_bootmem(sizeof(struct huge_bootmem_page)); 318 m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
316 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; 319 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
317#else 320#else
318 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); 321 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
@@ -352,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
352 if (size != 0) { 355 if (size != 0) {
353 if (sscanf(val, "%lu", &npages) <= 0) 356 if (sscanf(val, "%lu", &npages) <= 0)
354 npages = 0; 357 npages = 0;
358 if (npages > MAX_NUMBER_GPAGES) {
359 pr_warn("MMU: %lu pages requested for page "
360 "size %llu KB, limiting to "
361 __stringify(MAX_NUMBER_GPAGES) "\n",
362 npages, size / 1024);
363 npages = MAX_NUMBER_GPAGES;
364 }
355 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; 365 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
356 size = 0; 366 size = 0;
357 } 367 }
@@ -399,7 +409,7 @@ void __init reserve_hugetlb_gpages(void)
399#else /* !PPC_FSL_BOOK3E */ 409#else /* !PPC_FSL_BOOK3E */
400 410
401/* Build list of addresses of gigantic pages. This function is used in early 411/* Build list of addresses of gigantic pages. This function is used in early
402 * boot before the buddy or bootmem allocator is setup. 412 * boot before the buddy allocator is setup.
403 */ 413 */
404void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) 414void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
405{ 415{
@@ -462,7 +472,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
462{ 472{
463 struct hugepd_freelist **batchp; 473 struct hugepd_freelist **batchp;
464 474
465 batchp = &get_cpu_var(hugepd_freelist_cur); 475 batchp = this_cpu_ptr(&hugepd_freelist_cur);
466 476
467 if (atomic_read(&tlb->mm->mm_users) < 2 || 477 if (atomic_read(&tlb->mm->mm_users) < 2 ||
468 cpumask_equal(mm_cpumask(tlb->mm), 478 cpumask_equal(mm_cpumask(tlb->mm),
@@ -536,7 +546,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
536 do { 546 do {
537 pmd = pmd_offset(pud, addr); 547 pmd = pmd_offset(pud, addr);
538 next = pmd_addr_end(addr, end); 548 next = pmd_addr_end(addr, end);
539 if (!is_hugepd(pmd)) { 549 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
540 /* 550 /*
541 * if it is not hugepd pointer, we should already find 551 * if it is not hugepd pointer, we should already find
542 * it cleared. 552 * it cleared.
@@ -585,7 +595,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
585 do { 595 do {
586 pud = pud_offset(pgd, addr); 596 pud = pud_offset(pgd, addr);
587 next = pud_addr_end(addr, end); 597 next = pud_addr_end(addr, end);
588 if (!is_hugepd(pud)) { 598 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
589 if (pud_none_or_clear_bad(pud)) 599 if (pud_none_or_clear_bad(pud))
590 continue; 600 continue;
591 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 601 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -651,7 +661,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
651 do { 661 do {
652 next = pgd_addr_end(addr, end); 662 next = pgd_addr_end(addr, end);
653 pgd = pgd_offset(tlb->mm, addr); 663 pgd = pgd_offset(tlb->mm, addr);
654 if (!is_hugepd(pgd)) { 664 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
655 if (pgd_none_or_clear_bad(pgd)) 665 if (pgd_none_or_clear_bad(pgd))
656 continue; 666 continue;
657 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 667 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
@@ -711,12 +721,11 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
711 return (__boundary - 1 < end - 1) ? __boundary : end; 721 return (__boundary - 1 < end - 1) ? __boundary : end;
712} 722}
713 723
714int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, 724int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
715 unsigned long addr, unsigned long end, 725 unsigned long end, int write, struct page **pages, int *nr)
716 int write, struct page **pages, int *nr)
717{ 726{
718 pte_t *ptep; 727 pte_t *ptep;
719 unsigned long sz = 1UL << hugepd_shift(*hugepd); 728 unsigned long sz = 1UL << hugepd_shift(hugepd);
720 unsigned long next; 729 unsigned long next;
721 730
722 ptep = hugepte_offset(hugepd, addr, pdshift); 731 ptep = hugepte_offset(hugepd, addr, pdshift);
@@ -959,7 +968,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
959 else if (pgd_huge(pgd)) { 968 else if (pgd_huge(pgd)) {
960 ret_pte = (pte_t *) pgdp; 969 ret_pte = (pte_t *) pgdp;
961 goto out; 970 goto out;
962 } else if (is_hugepd(&pgd)) 971 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
963 hpdp = (hugepd_t *)&pgd; 972 hpdp = (hugepd_t *)&pgd;
964 else { 973 else {
965 /* 974 /*
@@ -976,7 +985,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
976 else if (pud_huge(pud)) { 985 else if (pud_huge(pud)) {
977 ret_pte = (pte_t *) pudp; 986 ret_pte = (pte_t *) pudp;
978 goto out; 987 goto out;
979 } else if (is_hugepd(&pud)) 988 } else if (is_hugepd(__hugepd(pud_val(pud))))
980 hpdp = (hugepd_t *)&pud; 989 hpdp = (hugepd_t *)&pud;
981 else { 990 else {
982 pdshift = PMD_SHIFT; 991 pdshift = PMD_SHIFT;
@@ -997,7 +1006,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
997 if (pmd_huge(pmd) || pmd_large(pmd)) { 1006 if (pmd_huge(pmd) || pmd_large(pmd)) {
998 ret_pte = (pte_t *) pmdp; 1007 ret_pte = (pte_t *) pmdp;
999 goto out; 1008 goto out;
1000 } else if (is_hugepd(&pmd)) 1009 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
1001 hpdp = (hugepd_t *)&pmd; 1010 hpdp = (hugepd_t *)&pmd;
1002 else 1011 else
1003 return pte_offset_kernel(&pmd, ea); 1012 return pte_offset_kernel(&pmd, ea);
@@ -1006,7 +1015,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
1006 if (!hpdp) 1015 if (!hpdp)
1007 return NULL; 1016 return NULL;
1008 1017
1009 ret_pte = hugepte_offset(hpdp, ea, pdshift); 1018 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
1010 pdshift = hugepd_shift(*hpdp); 1019 pdshift = hugepd_shift(*hpdp);
1011out: 1020out:
1012 if (shift) 1021 if (shift)
@@ -1036,14 +1045,6 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1036 if ((pte_val(pte) & mask) != mask) 1045 if ((pte_val(pte) & mask) != mask)
1037 return 0; 1046 return 0;
1038 1047
1039#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1040 /*
1041 * check for splitting here
1042 */
1043 if (pmd_trans_splitting(pte_pmd(pte)))
1044 return 0;
1045#endif
1046
1047 /* hugepages are never "special" */ 1048 /* hugepages are never "special" */
1048 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1049 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1049 1050