aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-19 14:19:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-19 14:19:49 -0400
commit416716ed390782e373875f39b39ee23c91902688 (patch)
treeaebef7c785d76634dc2b15803f590101ddbf1ceb
parent68465bb08ca68ab00997ff54a3a27dcb8c541357 (diff)
parent5e95235ccd5442d4a4fe11ec4eb99ba1b7959368 (diff)
Merge tag 'powerpc-4.1-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc fixes from Michael Ellerman: - THP/hugetlb fixes from Aneesh. - MCE fix from Daniel. - TOC fix from Anton. * tag 'powerpc-4.1-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: powerpc: Align TOC to 256 bytes powerpc/mce: fix off by one errors in mce event handling powerpc/mm: Return NULL for not present hugetlb page powerpc/thp: Serialize pmd clear against a linux page table walk.
-rw-r--r--arch/powerpc/kernel/mce.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/mm/hugetlbpage.c25
-rw-r--r--arch/powerpc/mm/pgtable_64.c11
4 files changed, 30 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 15c99b649b04..b2eb4686bd8f 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
73 uint64_t nip, uint64_t addr) 73 uint64_t nip, uint64_t addr)
74{ 74{
75 uint64_t srr1; 75 uint64_t srr1;
76 int index = __this_cpu_inc_return(mce_nest_count); 76 int index = __this_cpu_inc_return(mce_nest_count) - 1;
77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
78 78
79 /* 79 /*
@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return; 185 return;
186 186
187 index = __this_cpu_inc_return(mce_queue_count); 187 index = __this_cpu_inc_return(mce_queue_count) - 1;
188 /* If queue is full, just return for now. */ 188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) { 189 if (index >= MAX_MC_EVT) {
190 __this_cpu_dec(mce_queue_count); 190 __this_cpu_dec(mce_queue_count);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index f096e72262f4..1db685104ffc 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -213,6 +213,7 @@ SECTIONS
213 *(.opd) 213 *(.opd)
214 } 214 }
215 215
216 . = ALIGN(256);
216 .got : AT(ADDR(.got) - LOAD_OFFSET) { 217 .got : AT(ADDR(.got) - LOAD_OFFSET) {
217 __toc_start = .; 218 __toc_start = .;
218#ifndef CONFIG_RELOCATABLE 219#ifndef CONFIG_RELOCATABLE
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ce968b00b7c..3385e3d0506e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
689struct page * 689struct page *
690follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 690follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
691{ 691{
692 pte_t *ptep; 692 pte_t *ptep, pte;
693 struct page *page;
694 unsigned shift; 693 unsigned shift;
695 unsigned long mask, flags; 694 unsigned long mask, flags;
695 struct page *page = ERR_PTR(-EINVAL);
696
697 local_irq_save(flags);
698 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
699 if (!ptep)
700 goto no_page;
701 pte = READ_ONCE(*ptep);
696 /* 702 /*
703 * Verify it is a huge page else bail.
697 * Transparent hugepages are handled by generic code. We can skip them 704 * Transparent hugepages are handled by generic code. We can skip them
698 * here. 705 * here.
699 */ 706 */
700 local_irq_save(flags); 707 if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
701 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); 708 goto no_page;
702 709
703 /* Verify it is a huge page else bail. */ 710 if (!pte_present(pte)) {
704 if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) { 711 page = NULL;
705 local_irq_restore(flags); 712 goto no_page;
706 return ERR_PTR(-EINVAL);
707 } 713 }
708 mask = (1UL << shift) - 1; 714 mask = (1UL << shift) - 1;
709 page = pte_page(*ptep); 715 page = pte_page(pte);
710 if (page) 716 if (page)
711 page += (address & mask) / PAGE_SIZE; 717 page += (address & mask) / PAGE_SIZE;
712 718
719no_page:
713 local_irq_restore(flags); 720 local_irq_restore(flags);
714 return page; 721 return page;
715} 722}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 59daa5eeec25..6bfadf1aa5cb 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
839 * hash fault look at them. 839 * hash fault look at them.
840 */ 840 */
841 memset(pgtable, 0, PTE_FRAG_SIZE); 841 memset(pgtable, 0, PTE_FRAG_SIZE);
842 /*
843 * Serialize against find_linux_pte_or_hugepte which does lock-less
844 * lookup in page tables with local interrupts disabled. For huge pages
845 * it casts pmd_t to pte_t. Since format of pte_t is different from
846 * pmd_t we want to prevent transit from pmd pointing to page table
847 * to pmd pointing to huge page (and back) while interrupts are disabled.
848 * We clear pmd to possibly replace it with page table pointer in
849 * different code paths. So make sure we wait for the parallel
850 * find_linux_pte_or_hugepage to finish.
851 */
852 kick_all_cpus_sync();
842 return old_pmd; 853 return old_pmd;
843} 854}
844 855