diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-07-01 10:17:13 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-01 10:17:13 -0400 |
commit | 04bf7869ca0fd12009aee301cac2264a36df4d98 (patch) | |
tree | 66cb81ebf8b76560a31433c2c493dc430c914af9 /block/genhd.c | |
parent | d2f31a5fd60d168b00fc4f7617b68a1287b21e90 (diff) | |
parent | 7b28afe01ab6ffb5f152f47831b44933facd2328 (diff) |
Merge branch 'for-linus' into for-3.1/core
Conflicts:
block/blk-throttle.c
block/cfq-iosched.c
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/genhd.c')
-rw-r--r-- | block/genhd.c | 79 |
1 files changed, 45 insertions, 34 deletions
diff --git a/block/genhd.c b/block/genhd.c index ed3fe8224f9f..82d97c3594a8 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1371,6 +1371,7 @@ struct disk_events { | |||
1371 | struct gendisk *disk; /* the associated disk */ | 1371 | struct gendisk *disk; /* the associated disk */ |
1372 | spinlock_t lock; | 1372 | spinlock_t lock; |
1373 | 1373 | ||
1374 | struct mutex block_mutex; /* protects blocking */ | ||
1374 | int block; /* event blocking depth */ | 1375 | int block; /* event blocking depth */ |
1375 | unsigned int pending; /* events already sent out */ | 1376 | unsigned int pending; /* events already sent out */ |
1376 | unsigned int clearing; /* events being cleared */ | 1377 | unsigned int clearing; /* events being cleared */ |
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk) | |||
1414 | return msecs_to_jiffies(intv_msecs); | 1415 | return msecs_to_jiffies(intv_msecs); |
1415 | } | 1416 | } |
1416 | 1417 | ||
1417 | static void __disk_block_events(struct gendisk *disk, bool sync) | 1418 | /** |
1419 | * disk_block_events - block and flush disk event checking | ||
1420 | * @disk: disk to block events for | ||
1421 | * | ||
1422 | * On return from this function, it is guaranteed that event checking | ||
1423 | * isn't in progress and won't happen until unblocked by | ||
1424 | * disk_unblock_events(). Events blocking is counted and the actual | ||
1425 | * unblocking happens after the matching number of unblocks are done. | ||
1426 | * | ||
1427 | * Note that this intentionally does not block event checking from | ||
1428 | * disk_clear_events(). | ||
1429 | * | ||
1430 | * CONTEXT: | ||
1431 | * Might sleep. | ||
1432 | */ | ||
1433 | void disk_block_events(struct gendisk *disk) | ||
1418 | { | 1434 | { |
1419 | struct disk_events *ev = disk->ev; | 1435 | struct disk_events *ev = disk->ev; |
1420 | unsigned long flags; | 1436 | unsigned long flags; |
1421 | bool cancel; | 1437 | bool cancel; |
1422 | 1438 | ||
1439 | if (!ev) | ||
1440 | return; | ||
1441 | |||
1442 | /* | ||
1443 | * Outer mutex ensures that the first blocker completes canceling | ||
1444 | * the event work before further blockers are allowed to finish. | ||
1445 | */ | ||
1446 | mutex_lock(&ev->block_mutex); | ||
1447 | |||
1423 | spin_lock_irqsave(&ev->lock, flags); | 1448 | spin_lock_irqsave(&ev->lock, flags); |
1424 | cancel = !ev->block++; | 1449 | cancel = !ev->block++; |
1425 | spin_unlock_irqrestore(&ev->lock, flags); | 1450 | spin_unlock_irqrestore(&ev->lock, flags); |
1426 | 1451 | ||
1427 | if (cancel) { | 1452 | if (cancel) |
1428 | if (sync) | 1453 | cancel_delayed_work_sync(&disk->ev->dwork); |
1429 | cancel_delayed_work_sync(&disk->ev->dwork); | 1454 | |
1430 | else | 1455 | mutex_unlock(&ev->block_mutex); |
1431 | cancel_delayed_work(&disk->ev->dwork); | ||
1432 | } | ||
1433 | } | 1456 | } |
1434 | 1457 | ||
1435 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) | 1458 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) |
@@ -1461,27 +1484,6 @@ out_unlock: | |||
1461 | } | 1484 | } |
1462 | 1485 | ||
1463 | /** | 1486 | /** |
1464 | * disk_block_events - block and flush disk event checking | ||
1465 | * @disk: disk to block events for | ||
1466 | * | ||
1467 | * On return from this function, it is guaranteed that event checking | ||
1468 | * isn't in progress and won't happen until unblocked by | ||
1469 | * disk_unblock_events(). Events blocking is counted and the actual | ||
1470 | * unblocking happens after the matching number of unblocks are done. | ||
1471 | * | ||
1472 | * Note that this intentionally does not block event checking from | ||
1473 | * disk_clear_events(). | ||
1474 | * | ||
1475 | * CONTEXT: | ||
1476 | * Might sleep. | ||
1477 | */ | ||
1478 | void disk_block_events(struct gendisk *disk) | ||
1479 | { | ||
1480 | if (disk->ev) | ||
1481 | __disk_block_events(disk, true); | ||
1482 | } | ||
1483 | |||
1484 | /** | ||
1485 | * disk_unblock_events - unblock disk event checking | 1487 | * disk_unblock_events - unblock disk event checking |
1486 | * @disk: disk to unblock events for | 1488 | * @disk: disk to unblock events for |
1487 | * | 1489 | * |
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk) | |||
1508 | */ | 1510 | */ |
1509 | void disk_check_events(struct gendisk *disk) | 1511 | void disk_check_events(struct gendisk *disk) |
1510 | { | 1512 | { |
1511 | if (disk->ev) { | 1513 | struct disk_events *ev = disk->ev; |
1512 | __disk_block_events(disk, false); | 1514 | unsigned long flags; |
1513 | __disk_unblock_events(disk, true); | 1515 | |
1516 | if (!ev) | ||
1517 | return; | ||
1518 | |||
1519 | spin_lock_irqsave(&ev->lock, flags); | ||
1520 | if (!ev->block) { | ||
1521 | cancel_delayed_work(&ev->dwork); | ||
1522 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | ||
1514 | } | 1523 | } |
1524 | spin_unlock_irqrestore(&ev->lock, flags); | ||
1515 | } | 1525 | } |
1516 | EXPORT_SYMBOL_GPL(disk_check_events); | 1526 | EXPORT_SYMBOL_GPL(disk_check_events); |
1517 | 1527 | ||
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1546 | spin_unlock_irq(&ev->lock); | 1556 | spin_unlock_irq(&ev->lock); |
1547 | 1557 | ||
1548 | /* uncondtionally schedule event check and wait for it to finish */ | 1558 | /* uncondtionally schedule event check and wait for it to finish */ |
1549 | __disk_block_events(disk, true); | 1559 | disk_block_events(disk); |
1550 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1560 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); |
1551 | flush_delayed_work(&ev->dwork); | 1561 | flush_delayed_work(&ev->dwork); |
1552 | __disk_unblock_events(disk, false); | 1562 | __disk_unblock_events(disk, false); |
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev, | |||
1664 | if (intv < 0 && intv != -1) | 1674 | if (intv < 0 && intv != -1) |
1665 | return -EINVAL; | 1675 | return -EINVAL; |
1666 | 1676 | ||
1667 | __disk_block_events(disk, true); | 1677 | disk_block_events(disk); |
1668 | disk->ev->poll_msecs = intv; | 1678 | disk->ev->poll_msecs = intv; |
1669 | __disk_unblock_events(disk, true); | 1679 | __disk_unblock_events(disk, true); |
1670 | 1680 | ||
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk) | |||
1750 | INIT_LIST_HEAD(&ev->node); | 1760 | INIT_LIST_HEAD(&ev->node); |
1751 | ev->disk = disk; | 1761 | ev->disk = disk; |
1752 | spin_lock_init(&ev->lock); | 1762 | spin_lock_init(&ev->lock); |
1763 | mutex_init(&ev->block_mutex); | ||
1753 | ev->block = 1; | 1764 | ev->block = 1; |
1754 | ev->poll_msecs = -1; | 1765 | ev->poll_msecs = -1; |
1755 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); | 1766 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); |
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk) | |||
1770 | if (!disk->ev) | 1781 | if (!disk->ev) |
1771 | return; | 1782 | return; |
1772 | 1783 | ||
1773 | __disk_block_events(disk, true); | 1784 | disk_block_events(disk); |
1774 | 1785 | ||
1775 | mutex_lock(&disk_events_mutex); | 1786 | mutex_lock(&disk_events_mutex); |
1776 | list_del_init(&disk->ev->node); | 1787 | list_del_init(&disk->ev->node); |