aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-06-09 14:43:55 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-06-09 14:43:55 -0400
commitc3af54afbac3675337cedf326b7b127ffa7f7327 (patch)
treecc49ada3a2ef88f1415af73635cff7d062615856 /block
parenta9dce2a3b4f0686dd66cb44d4826a59508bce969 (diff)
block: remove non-syncing __disk_block_events() and fold it into disk_block_events()
After the previous update to disk_check_events(), nobody is using non-syncing __disk_block_events(). Remove @sync and, as this makes __disk_block_events() virtually identical to disk_block_events(), remove the underscore prefixed version. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/genhd.c55
1 files changed, 24 insertions, 31 deletions
diff --git a/block/genhd.c b/block/genhd.c
index 3f0933077642..ab0731d8976d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1414,22 +1414,36 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1414 return msecs_to_jiffies(intv_msecs); 1414 return msecs_to_jiffies(intv_msecs);
1415} 1415}
1416 1416
1417static void __disk_block_events(struct gendisk *disk, bool sync) 1417/**
1418 * disk_block_events - block and flush disk event checking
1419 * @disk: disk to block events for
1420 *
1421 * On return from this function, it is guaranteed that event checking
1422 * isn't in progress and won't happen until unblocked by
1423 * disk_unblock_events(). Events blocking is counted and the actual
1424 * unblocking happens after the matching number of unblocks are done.
1425 *
1426 * Note that this intentionally does not block event checking from
1427 * disk_clear_events().
1428 *
1429 * CONTEXT:
1430 * Might sleep.
1431 */
1432void disk_block_events(struct gendisk *disk)
1418{ 1433{
1419 struct disk_events *ev = disk->ev; 1434 struct disk_events *ev = disk->ev;
1420 unsigned long flags; 1435 unsigned long flags;
1421 bool cancel; 1436 bool cancel;
1422 1437
1438 if (!ev)
1439 return;
1440
1423 spin_lock_irqsave(&ev->lock, flags); 1441 spin_lock_irqsave(&ev->lock, flags);
1424 cancel = !ev->block++; 1442 cancel = !ev->block++;
1425 spin_unlock_irqrestore(&ev->lock, flags); 1443 spin_unlock_irqrestore(&ev->lock, flags);
1426 1444
1427 if (cancel) { 1445 if (cancel)
1428 if (sync) 1446 cancel_delayed_work_sync(&disk->ev->dwork);
1429 cancel_delayed_work_sync(&disk->ev->dwork);
1430 else
1431 cancel_delayed_work(&disk->ev->dwork);
1432 }
1433} 1447}
1434 1448
1435static void __disk_unblock_events(struct gendisk *disk, bool check_now) 1449static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1461,27 +1475,6 @@ out_unlock:
1461} 1475}
1462 1476
1463/** 1477/**
1464 * disk_block_events - block and flush disk event checking
1465 * @disk: disk to block events for
1466 *
1467 * On return from this function, it is guaranteed that event checking
1468 * isn't in progress and won't happen until unblocked by
1469 * disk_unblock_events(). Events blocking is counted and the actual
1470 * unblocking happens after the matching number of unblocks are done.
1471 *
1472 * Note that this intentionally does not block event checking from
1473 * disk_clear_events().
1474 *
1475 * CONTEXT:
1476 * Might sleep.
1477 */
1478void disk_block_events(struct gendisk *disk)
1479{
1480 if (disk->ev)
1481 __disk_block_events(disk, true);
1482}
1483
1484/**
1485 * disk_unblock_events - unblock disk event checking 1478 * disk_unblock_events - unblock disk event checking
1486 * @disk: disk to unblock events for 1479 * @disk: disk to unblock events for
1487 * 1480 *
@@ -1554,7 +1547,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1554 spin_unlock_irq(&ev->lock); 1547 spin_unlock_irq(&ev->lock);
1555 1548
1556 /* uncondtionally schedule event check and wait for it to finish */ 1549 /* uncondtionally schedule event check and wait for it to finish */
1557 __disk_block_events(disk, true); 1550 disk_block_events(disk);
1558 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1551 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1559 flush_delayed_work(&ev->dwork); 1552 flush_delayed_work(&ev->dwork);
1560 __disk_unblock_events(disk, false); 1553 __disk_unblock_events(disk, false);
@@ -1672,7 +1665,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
1672 if (intv < 0 && intv != -1) 1665 if (intv < 0 && intv != -1)
1673 return -EINVAL; 1666 return -EINVAL;
1674 1667
1675 __disk_block_events(disk, true); 1668 disk_block_events(disk);
1676 disk->ev->poll_msecs = intv; 1669 disk->ev->poll_msecs = intv;
1677 __disk_unblock_events(disk, true); 1670 __disk_unblock_events(disk, true);
1678 1671
@@ -1778,7 +1771,7 @@ static void disk_del_events(struct gendisk *disk)
1778 if (!disk->ev) 1771 if (!disk->ev)
1779 return; 1772 return;
1780 1773
1781 __disk_block_events(disk, true); 1774 disk_block_events(disk);
1782 1775
1783 mutex_lock(&disk_events_mutex); 1776 mutex_lock(&disk_events_mutex);
1784 list_del_init(&disk->ev->node); 1777 list_del_init(&disk->ev->node);