aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2011-10-30 10:17:01 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-10-30 10:16:44 -0400
commitcc772456ac9b460693492b3a3d89e8c81eda5874 (patch)
tree7cd7a0cc3dd7fffeae5ed8e98ff57b709247c9e5 /arch/s390/mm
parenta9162f238a84ee05b09ea4b0ebd97fb20448c28c (diff)
[S390] fix list corruption in gmap reverse mapping
This introduces locking via mm->page_table_lock to protect the rmap list for guest mappings from being corrupted by concurrent operations. Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e4a4cefb92b3..96e85ac89269 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
222 222
223 /* Free all segment & region tables. */ 223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem); 224 down_read(&gmap->mm->mmap_sem);
225 spin_lock(&gmap->mm->page_table_lock);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { 226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page); 227 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) 228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
230 gmap_unlink_segment(gmap, table); 231 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER); 232 __free_pages(page, ALLOC_ORDER);
232 } 233 }
234 spin_unlock(&gmap->mm->page_table_lock);
233 up_read(&gmap->mm->mmap_sem); 235 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list); 236 list_del(&gmap->list);
235 kfree(gmap); 237 kfree(gmap);
@@ -300,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
300 302
301 flush = 0; 303 flush = 0;
302 down_read(&gmap->mm->mmap_sem); 304 down_read(&gmap->mm->mmap_sem);
305 spin_lock(&gmap->mm->page_table_lock);
303 for (off = 0; off < len; off += PMD_SIZE) { 306 for (off = 0; off < len; off += PMD_SIZE) {
304 /* Walk the guest addr space page table */ 307 /* Walk the guest addr space page table */
305 table = gmap->table + (((to + off) >> 53) & 0x7ff); 308 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -321,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
321 *table = _SEGMENT_ENTRY_INV; 324 *table = _SEGMENT_ENTRY_INV;
322 } 325 }
323out: 326out:
327 spin_unlock(&gmap->mm->page_table_lock);
324 up_read(&gmap->mm->mmap_sem); 328 up_read(&gmap->mm->mmap_sem);
325 if (flush) 329 if (flush)
326 gmap_flush_tlb(gmap); 330 gmap_flush_tlb(gmap);
@@ -351,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
351 355
352 flush = 0; 356 flush = 0;
353 down_read(&gmap->mm->mmap_sem); 357 down_read(&gmap->mm->mmap_sem);
358 spin_lock(&gmap->mm->page_table_lock);
354 for (off = 0; off < len; off += PMD_SIZE) { 359 for (off = 0; off < len; off += PMD_SIZE) {
355 /* Walk the gmap address space page table */ 360 /* Walk the gmap address space page table */
356 table = gmap->table + (((to + off) >> 53) & 0x7ff); 361 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -374,12 +379,14 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
374 flush |= gmap_unlink_segment(gmap, table); 379 flush |= gmap_unlink_segment(gmap, table);
375 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 380 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
376 } 381 }
382 spin_unlock(&gmap->mm->page_table_lock);
377 up_read(&gmap->mm->mmap_sem); 383 up_read(&gmap->mm->mmap_sem);
378 if (flush) 384 if (flush)
379 gmap_flush_tlb(gmap); 385 gmap_flush_tlb(gmap);
380 return 0; 386 return 0;
381 387
382out_unmap: 388out_unmap:
389 spin_unlock(&gmap->mm->page_table_lock);
383 up_read(&gmap->mm->mmap_sem); 390 up_read(&gmap->mm->mmap_sem);
384 gmap_unmap_segment(gmap, to, len); 391 gmap_unmap_segment(gmap, to, len);
385 return -ENOMEM; 392 return -ENOMEM;
@@ -446,7 +453,9 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
446 page = pmd_page(*pmd); 453 page = pmd_page(*pmd);
447 mp = (struct gmap_pgtable *) page->index; 454 mp = (struct gmap_pgtable *) page->index;
448 rmap->entry = table; 455 rmap->entry = table;
456 spin_lock(&mm->page_table_lock);
449 list_add(&rmap->list, &mp->mapper); 457 list_add(&rmap->list, &mp->mapper);
458 spin_unlock(&mm->page_table_lock);
450 /* Set gmap segment table entry to page table. */ 459 /* Set gmap segment table entry to page table. */
451 *table = pmd_val(*pmd) & PAGE_MASK; 460 *table = pmd_val(*pmd) & PAGE_MASK;
452 return vmaddr | (address & ~PMD_MASK); 461 return vmaddr | (address & ~PMD_MASK);