aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-03-08 06:30:46 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 03:54:28 -0400
commit0f7f84891516dc1ff7500fae12143710d2d9d11f (patch)
treeae5185ba98c655404b8f5ee9e46060824fb08cba /arch/s390/mm
parent998f637cc4b9ef3fa32b196294a3136ee05271a2 (diff)
s390/mm: fix races on gmap_shadow creation
Before any thread is allowed to use a gmap_shadow, it has to be fully initialized. However, for invalidation to work properly, we have to register the new gmap_shadow before we protect the parent gmap table. Because locking is tricky, and we have to avoid duplicate gmaps, let's introduce an initialized field, that signalizes other threads if that gmap_shadow can already be used or if they have to retry. Let's properly return errors using ERR_PTR() instead of simply returning NULL, so a caller can properly react on the error. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index a396e58b5a43..a7dfb337e133 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1384,7 +1384,8 @@ static void gmap_unshadow(struct gmap *sg)
1384 * @asce: ASCE for which the shadow table is created 1384 * @asce: ASCE for which the shadow table is created
1385 * 1385 *
1386 * Returns the pointer to a gmap if a shadow table with the given asce is 1386 * Returns the pointer to a gmap if a shadow table with the given asce is
1387 * already available, otherwise NULL 1387 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1388 * otherwise NULL
1388 */ 1389 */
1389static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce) 1390static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
1390{ 1391{
@@ -1393,6 +1394,8 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
1393 list_for_each_entry(sg, &parent->children, list) { 1394 list_for_each_entry(sg, &parent->children, list) {
1394 if (sg->orig_asce != asce || sg->removed) 1395 if (sg->orig_asce != asce || sg->removed)
1395 continue; 1396 continue;
1397 if (!sg->initialized)
1398 return ERR_PTR(-EAGAIN);
1396 atomic_inc(&sg->ref_count); 1399 atomic_inc(&sg->ref_count);
1397 return sg; 1400 return sg;
1398 } 1401 }
@@ -1409,8 +1412,9 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
1409 * The shadow table will be removed automatically on any change to the 1412 * The shadow table will be removed automatically on any change to the
1410 * PTE mapping for the source table. 1413 * PTE mapping for the source table.
1411 * 1414 *
1412 * Returns a guest address space structure, NULL if out of memory or if 1415 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1413 * anything goes wrong while protecting the top level pages. 1416 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1417 * parent gmap table could not be protected.
1414 */ 1418 */
1415struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce) 1419struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
1416{ 1420{
@@ -1428,30 +1432,37 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
1428 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); 1432 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1429 new = gmap_alloc(limit); 1433 new = gmap_alloc(limit);
1430 if (!new) 1434 if (!new)
1431 return NULL; 1435 return ERR_PTR(-ENOMEM);
1432 new->mm = parent->mm; 1436 new->mm = parent->mm;
1433 new->parent = gmap_get(parent); 1437 new->parent = gmap_get(parent);
1434 new->orig_asce = asce; 1438 new->orig_asce = asce;
1439 new->initialized = false;
1440 spin_lock(&parent->shadow_lock);
1441 /* Recheck if another CPU created the same shadow */
1442 sg = gmap_find_shadow(parent, asce);
1443 if (sg) {
1444 spin_unlock(&parent->shadow_lock);
1445 gmap_free(new);
1446 return sg;
1447 }
1448 atomic_set(&new->ref_count, 2);
1449 list_add(&new->list, &parent->children);
1450 spin_unlock(&parent->shadow_lock);
1451 /* protect after insertion, so it will get properly invalidated */
1435 down_read(&parent->mm->mmap_sem); 1452 down_read(&parent->mm->mmap_sem);
1436 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, 1453 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1437 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096, 1454 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1438 PROT_READ, PGSTE_VSIE_BIT); 1455 PROT_READ, PGSTE_VSIE_BIT);
1439 up_read(&parent->mm->mmap_sem); 1456 up_read(&parent->mm->mmap_sem);
1457 spin_lock(&parent->shadow_lock);
1458 new->initialized = true;
1440 if (rc) { 1459 if (rc) {
1441 atomic_set(&new->ref_count, 2); 1460 list_del(&new->list);
1442 spin_lock(&parent->shadow_lock);
1443 /* Recheck if another CPU created the same shadow */
1444 sg = gmap_find_shadow(parent, asce);
1445 if (!sg) {
1446 list_add(&new->list, &parent->children);
1447 sg = new;
1448 new = NULL;
1449 }
1450 spin_unlock(&parent->shadow_lock);
1451 }
1452 if (new)
1453 gmap_free(new); 1461 gmap_free(new);
1454 return sg; 1462 new = ERR_PTR(rc);
1463 }
1464 spin_unlock(&parent->shadow_lock);
1465 return new;
1455} 1466}
1456EXPORT_SYMBOL_GPL(gmap_shadow); 1467EXPORT_SYMBOL_GPL(gmap_shadow);
1457 1468