aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-04 13:29:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-04 13:29:23 -0400
commit65b97fb7303050fc826e518cf67fc283da23314f (patch)
tree595e7f04d65d95a39d65bd2dcf2385b3b6ea7969 /mm/huge_memory.c
parentddcf6600b133697adbafd96e080818bdc0dfd028 (diff)
parent1d8b368ab4aacfc3f864655baad4d31a3028ec1a (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "This is the powerpc changes for the 3.11 merge window. In addition to the usual bug fixes and small updates, the main highlights are: - Support for transparent huge pages by Aneesh Kumar for 64-bit server processors. This allows the use of 16M pages as transparent huge pages on kernels compiled with a 64K base page size. - Base VFIO support for KVM on power by Alexey Kardashevskiy - Wiring up of our nvram to the pstore infrastructure, including putting compressed oopses in there by Aruna Balakrishnaiah - Move, rework and improve our "EEH" (basically PCI error handling and recovery) infrastructure. It is no longer specific to pseries but is now usable by the new "powernv" platform as well (no hypervisor) by Gavin Shan. - I fixed some bugs in our math-emu instruction decoding and made it usable to emulate some optional FP instructions on processors with hard FP that lack them (such as fsqrt on Freescale embedded processors). - Support for Power8 "Event Based Branch" facility by Michael Ellerman. This facility allows what is basically "userspace interrupts" for performance monitor events. - A bunch of Transactional Memory vs. Signals bug fixes and HW breakpoint/watchpoint fixes by Michael Neuling. And more ... I appologize in advance if I've failed to highlight something that somebody deemed worth it." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (156 commits) pstore: Add hsize argument in write_buf call of pstore_ftrace_call powerpc/fsl: add MPIC timer wakeup support powerpc/mpic: create mpic subsystem object powerpc/mpic: add global timer support powerpc/mpic: add irq_set_wake support powerpc/85xx: enable coreint for all the 64bit boards powerpc/8xx: Erroneous double irq_eoi() on CPM IRQ in MPC8xx powerpc/fsl: Enable CONFIG_E1000E in mpc85xx_smp_defconfig powerpc/mpic: Add get_version API both for internal and external use powerpc: Handle both new style and old style reserve maps powerpc/hw_brk: Fix off by one error when validating DAWR region end powerpc/pseries: Support compression of oops text via pstore powerpc/pseries: Re-organise the oops compression code pstore: Pass header size in the pstore write callback powerpc/powernv: Fix iommu initialization again powerpc/pseries: Inform the hypervisor we are using EBB regs powerpc/perf: Add power8 EBB support powerpc/perf: Core EBB support for 64-bit book3s powerpc/perf: Drop MMCRA from thread_struct powerpc/perf: Don't enable if we have zero events ...
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d8b3b850150c..243e710c6039 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -729,8 +729,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
729 pmd_t entry; 729 pmd_t entry;
730 entry = mk_huge_pmd(page, vma); 730 entry = mk_huge_pmd(page, vma);
731 page_add_new_anon_rmap(page, vma, haddr); 731 page_add_new_anon_rmap(page, vma, haddr);
732 pgtable_trans_huge_deposit(mm, pmd, pgtable);
732 set_pmd_at(mm, haddr, pmd, entry); 733 set_pmd_at(mm, haddr, pmd, entry);
733 pgtable_trans_huge_deposit(mm, pgtable);
734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
735 mm->nr_ptes++; 735 mm->nr_ptes++;
736 spin_unlock(&mm->page_table_lock); 736 spin_unlock(&mm->page_table_lock);
@@ -771,8 +771,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
771 entry = mk_pmd(zero_page, vma->vm_page_prot); 771 entry = mk_pmd(zero_page, vma->vm_page_prot);
772 entry = pmd_wrprotect(entry); 772 entry = pmd_wrprotect(entry);
773 entry = pmd_mkhuge(entry); 773 entry = pmd_mkhuge(entry);
774 pgtable_trans_huge_deposit(mm, pmd, pgtable);
774 set_pmd_at(mm, haddr, pmd, entry); 775 set_pmd_at(mm, haddr, pmd, entry);
775 pgtable_trans_huge_deposit(mm, pgtable);
776 mm->nr_ptes++; 776 mm->nr_ptes++;
777 return true; 777 return true;
778} 778}
@@ -916,8 +916,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
916 916
917 pmdp_set_wrprotect(src_mm, addr, src_pmd); 917 pmdp_set_wrprotect(src_mm, addr, src_pmd);
918 pmd = pmd_mkold(pmd_wrprotect(pmd)); 918 pmd = pmd_mkold(pmd_wrprotect(pmd));
919 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
919 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 920 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
920 pgtable_trans_huge_deposit(dst_mm, pgtable);
921 dst_mm->nr_ptes++; 921 dst_mm->nr_ptes++;
922 922
923 ret = 0; 923 ret = 0;
@@ -987,7 +987,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
987 pmdp_clear_flush(vma, haddr, pmd); 987 pmdp_clear_flush(vma, haddr, pmd);
988 /* leave pmd empty until pte is filled */ 988 /* leave pmd empty until pte is filled */
989 989
990 pgtable = pgtable_trans_huge_withdraw(mm); 990 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
991 pmd_populate(mm, &_pmd, pgtable); 991 pmd_populate(mm, &_pmd, pgtable);
992 992
993 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 993 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1085,7 +1085,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1085 pmdp_clear_flush(vma, haddr, pmd); 1085 pmdp_clear_flush(vma, haddr, pmd);
1086 /* leave pmd empty until pte is filled */ 1086 /* leave pmd empty until pte is filled */
1087 1087
1088 pgtable = pgtable_trans_huge_withdraw(mm); 1088 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1089 pmd_populate(mm, &_pmd, pgtable); 1089 pmd_populate(mm, &_pmd, pgtable);
1090 1090
1091 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1091 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1265,7 +1265,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1265 * young bit, instead of the current set_pmd_at. 1265 * young bit, instead of the current set_pmd_at.
1266 */ 1266 */
1267 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1267 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1268 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 1268 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1269 pmd, _pmd, 1))
1270 update_mmu_cache_pmd(vma, addr, pmd);
1269 } 1271 }
1270 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1272 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1271 if (page->mapping && trylock_page(page)) { 1273 if (page->mapping && trylock_page(page)) {
@@ -1358,9 +1360,15 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1358 struct page *page; 1360 struct page *page;
1359 pgtable_t pgtable; 1361 pgtable_t pgtable;
1360 pmd_t orig_pmd; 1362 pmd_t orig_pmd;
1361 pgtable = pgtable_trans_huge_withdraw(tlb->mm); 1363 /*
1364 * For architectures like ppc64 we look at deposited pgtable
1365 * when calling pmdp_get_and_clear. So do the
1366 * pgtable_trans_huge_withdraw after finishing pmdp related
1367 * operations.
1368 */
1362 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1369 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1363 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1370 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1371 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1364 if (is_huge_zero_pmd(orig_pmd)) { 1372 if (is_huge_zero_pmd(orig_pmd)) {
1365 tlb->mm->nr_ptes--; 1373 tlb->mm->nr_ptes--;
1366 spin_unlock(&tlb->mm->page_table_lock); 1374 spin_unlock(&tlb->mm->page_table_lock);
@@ -1691,7 +1699,7 @@ static int __split_huge_page_map(struct page *page,
1691 pmd = page_check_address_pmd(page, mm, address, 1699 pmd = page_check_address_pmd(page, mm, address,
1692 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1700 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1693 if (pmd) { 1701 if (pmd) {
1694 pgtable = pgtable_trans_huge_withdraw(mm); 1702 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1695 pmd_populate(mm, &_pmd, pgtable); 1703 pmd_populate(mm, &_pmd, pgtable);
1696 1704
1697 haddr = address; 1705 haddr = address;
@@ -2359,9 +2367,9 @@ static void collapse_huge_page(struct mm_struct *mm,
2359 spin_lock(&mm->page_table_lock); 2367 spin_lock(&mm->page_table_lock);
2360 BUG_ON(!pmd_none(*pmd)); 2368 BUG_ON(!pmd_none(*pmd));
2361 page_add_new_anon_rmap(new_page, vma, address); 2369 page_add_new_anon_rmap(new_page, vma, address);
2370 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2362 set_pmd_at(mm, address, pmd, _pmd); 2371 set_pmd_at(mm, address, pmd, _pmd);
2363 update_mmu_cache_pmd(vma, address, pmd); 2372 update_mmu_cache_pmd(vma, address, pmd);
2364 pgtable_trans_huge_deposit(mm, pgtable);
2365 spin_unlock(&mm->page_table_lock); 2373 spin_unlock(&mm->page_table_lock);
2366 2374
2367 *hpage = NULL; 2375 *hpage = NULL;
@@ -2667,7 +2675,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2667 pmdp_clear_flush(vma, haddr, pmd); 2675 pmdp_clear_flush(vma, haddr, pmd);
2668 /* leave pmd empty until pte is filled */ 2676 /* leave pmd empty until pte is filled */
2669 2677
2670 pgtable = pgtable_trans_huge_withdraw(mm); 2678 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2671 pmd_populate(mm, &_pmd, pgtable); 2679 pmd_populate(mm, &_pmd, pgtable);
2672 2680
2673 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2681 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {