aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2013-10-31 05:01:16 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-31 11:52:58 -0400
commitbe39f1968e33ca641af120a2d659421ad2225dea (patch)
treea650e4ff8561174693a8ed19ccc97932b1883d60
parent1db9e0513d7478f6c80ca0bc4f58f53fe49e27f8 (diff)
s390/mm: page_table_realloc returns failure
There is a possible race between setting has_pgste and reallocation of the page_table, change the order to fix this. Also page_table_alloc_pgste can fail, in that case we need to backpropagte this as -ENOMEM to the caller of page_table_realloc. Based on a patch by Christian Borntraeger <borntraeger@de.ibm.com>. Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/mm/pgtable.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 94f37a9fb1e5..a9be08899b0c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1087,10 +1087,9 @@ again:
1087 continue; 1087 continue;
1088 /* Allocate new page table with pgstes */ 1088 /* Allocate new page table with pgstes */
1089 new = page_table_alloc_pgste(mm, addr); 1089 new = page_table_alloc_pgste(mm, addr);
1090 if (!new) { 1090 if (!new)
1091 mm->context.has_pgste = 0; 1091 return -ENOMEM;
1092 continue; 1092
1093 }
1094 spin_lock(&mm->page_table_lock); 1093 spin_lock(&mm->page_table_lock);
1095 if (likely((unsigned long *) pmd_deref(*pmd) == table)) { 1094 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1096 /* Nuke pmd entry pointing to the "short" page table */ 1095 /* Nuke pmd entry pointing to the "short" page table */
@@ -1128,13 +1127,15 @@ static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1128 if (pud_none_or_clear_bad(pud)) 1127 if (pud_none_or_clear_bad(pud))
1129 continue; 1128 continue;
1130 next = page_table_realloc_pmd(tlb, mm, pud, addr, next); 1129 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1130 if (unlikely(IS_ERR_VALUE(next)))
1131 return next;
1131 } while (pud++, addr = next, addr != end); 1132 } while (pud++, addr = next, addr != end);
1132 1133
1133 return addr; 1134 return addr;
1134} 1135}
1135 1136
1136static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, 1137static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1137 unsigned long addr, unsigned long end) 1138 unsigned long addr, unsigned long end)
1138{ 1139{
1139 unsigned long next; 1140 unsigned long next;
1140 pgd_t *pgd; 1141 pgd_t *pgd;
@@ -1145,7 +1146,11 @@ static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1145 if (pgd_none_or_clear_bad(pgd)) 1146 if (pgd_none_or_clear_bad(pgd))
1146 continue; 1147 continue;
1147 next = page_table_realloc_pud(tlb, mm, pgd, addr, next); 1148 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1149 if (unlikely(IS_ERR_VALUE(next)))
1150 return next;
1148 } while (pgd++, addr = next, addr != end); 1151 } while (pgd++, addr = next, addr != end);
1152
1153 return 0;
1149} 1154}
1150 1155
1151/* 1156/*
@@ -1165,9 +1170,9 @@ int s390_enable_sie(void)
1165 /* split thp mappings and disable thp for future mappings */ 1170 /* split thp mappings and disable thp for future mappings */
1166 thp_split_mm(mm); 1171 thp_split_mm(mm);
1167 /* Reallocate the page tables with pgstes */ 1172 /* Reallocate the page tables with pgstes */
1168 mm->context.has_pgste = 1;
1169 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); 1173 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1170 page_table_realloc(&tlb, mm, 0, TASK_SIZE); 1174 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1175 mm->context.has_pgste = 1;
1171 tlb_finish_mmu(&tlb, 0, TASK_SIZE); 1176 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1172 up_write(&mm->mmap_sem); 1177 up_write(&mm->mmap_sem);
1173 return mm->context.has_pgste ? 0 : -ENOMEM; 1178 return mm->context.has_pgste ? 0 : -ENOMEM;