aboutsummaryrefslogtreecommitdiffstats
path: root/fs/block_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c75
1 files changed, 51 insertions, 24 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1a2421f908f0..ff77262e887c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode)
44{ 44{
45 return &BDEV_I(inode)->bdev; 45 return &BDEV_I(inode)->bdev;
46} 46}
47
48EXPORT_SYMBOL(I_BDEV); 47EXPORT_SYMBOL(I_BDEV);
49 48
50/* 49/*
51 * move the inode from it's current bdi to the a new bdi. if the inode is dirty 50 * Move the inode from its current bdi to a new bdi. If the inode is dirty we
52 * we need to move it onto the dirty list of @dst so that the inode is always 51 * need to move it onto the dirty list of @dst so that the inode is always on
53 * on the right list. 52 * the right list.
54 */ 53 */
55static void bdev_inode_switch_bdi(struct inode *inode, 54static void bdev_inode_switch_bdi(struct inode *inode,
56 struct backing_dev_info *dst) 55 struct backing_dev_info *dst)
57{ 56{
58 spin_lock(&inode_wb_list_lock); 57 struct backing_dev_info *old = inode->i_data.backing_dev_info;
58
59 if (unlikely(dst == old)) /* deadlock avoidance */
60 return;
61 bdi_lock_two(&old->wb, &dst->wb);
59 spin_lock(&inode->i_lock); 62 spin_lock(&inode->i_lock);
60 inode->i_data.backing_dev_info = dst; 63 inode->i_data.backing_dev_info = dst;
61 if (inode->i_state & I_DIRTY) 64 if (inode->i_state & I_DIRTY)
62 list_move(&inode->i_wb_list, &dst->wb.b_dirty); 65 list_move(&inode->i_wb_list, &dst->wb.b_dirty);
63 spin_unlock(&inode->i_lock); 66 spin_unlock(&inode->i_lock);
64 spin_unlock(&inode_wb_list_lock); 67 spin_unlock(&old->wb.list_lock);
68 spin_unlock(&dst->wb.list_lock);
65} 69}
66 70
67static sector_t max_block(struct block_device *bdev) 71static sector_t max_block(struct block_device *bdev)
@@ -355,43 +359,48 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
355 mutex_lock(&bd_inode->i_mutex); 359 mutex_lock(&bd_inode->i_mutex);
356 size = i_size_read(bd_inode); 360 size = i_size_read(bd_inode);
357 361
362 retval = -EINVAL;
358 switch (origin) { 363 switch (origin) {
359 case 2: 364 case SEEK_END:
360 offset += size; 365 offset += size;
361 break; 366 break;
362 case 1: 367 case SEEK_CUR:
363 offset += file->f_pos; 368 offset += file->f_pos;
369 case SEEK_SET:
370 break;
371 default:
372 goto out;
364 } 373 }
365 retval = -EINVAL;
366 if (offset >= 0 && offset <= size) { 374 if (offset >= 0 && offset <= size) {
367 if (offset != file->f_pos) { 375 if (offset != file->f_pos) {
368 file->f_pos = offset; 376 file->f_pos = offset;
369 } 377 }
370 retval = offset; 378 retval = offset;
371 } 379 }
380out:
372 mutex_unlock(&bd_inode->i_mutex); 381 mutex_unlock(&bd_inode->i_mutex);
373 return retval; 382 return retval;
374} 383}
375 384
376int blkdev_fsync(struct file *filp, int datasync) 385int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
377{ 386{
378 struct inode *bd_inode = filp->f_mapping->host; 387 struct inode *bd_inode = filp->f_mapping->host;
379 struct block_device *bdev = I_BDEV(bd_inode); 388 struct block_device *bdev = I_BDEV(bd_inode);
380 int error; 389 int error;
390
391 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
392 if (error)
393 return error;
381 394
382 /* 395 /*
383 * There is no need to serialise calls to blkdev_issue_flush with 396 * There is no need to serialise calls to blkdev_issue_flush with
384 * i_mutex and doing so causes performance issues with concurrent 397 * i_mutex and doing so causes performance issues with concurrent
385 * O_SYNC writers to a block device. 398 * O_SYNC writers to a block device.
386 */ 399 */
387 mutex_unlock(&bd_inode->i_mutex);
388
389 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); 400 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
390 if (error == -EOPNOTSUPP) 401 if (error == -EOPNOTSUPP)
391 error = 0; 402 error = 0;
392 403
393 mutex_lock(&bd_inode->i_mutex);
394
395 return error; 404 return error;
396} 405}
397EXPORT_SYMBOL(blkdev_fsync); 406EXPORT_SYMBOL(blkdev_fsync);
@@ -547,6 +556,7 @@ struct block_device *bdget(dev_t dev)
547 556
548 if (inode->i_state & I_NEW) { 557 if (inode->i_state & I_NEW) {
549 bdev->bd_contains = NULL; 558 bdev->bd_contains = NULL;
559 bdev->bd_super = NULL;
550 bdev->bd_inode = inode; 560 bdev->bd_inode = inode;
551 bdev->bd_block_size = (1 << inode->i_blkbits); 561 bdev->bd_block_size = (1 << inode->i_blkbits);
552 bdev->bd_part_count = 0; 562 bdev->bd_part_count = 0;
@@ -762,7 +772,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
762 if (!disk) 772 if (!disk)
763 return ERR_PTR(-ENXIO); 773 return ERR_PTR(-ENXIO);
764 774
765 whole = bdget_disk(disk, 0); 775 /*
776 * Normally, @bdev should equal what's returned from bdget_disk()
777 * if partno is 0; however, some drivers (floppy) use multiple
778 * bdev's for the same physical device and @bdev may be one of the
779 * aliases. Keep @bdev if partno is 0. This means claimer
780 * tracking is broken for those devices but it has always been that
781 * way.
782 */
783 if (partno)
784 whole = bdget_disk(disk, 0);
785 else
786 whole = bdgrab(bdev);
787
766 module_put(disk->fops->owner); 788 module_put(disk->fops->owner);
767 put_disk(disk); 789 put_disk(disk);
768 if (!whole) 790 if (!whole)
@@ -1435,6 +1457,8 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1435 1457
1436int blkdev_put(struct block_device *bdev, fmode_t mode) 1458int blkdev_put(struct block_device *bdev, fmode_t mode)
1437{ 1459{
1460 mutex_lock(&bdev->bd_mutex);
1461
1438 if (mode & FMODE_EXCL) { 1462 if (mode & FMODE_EXCL) {
1439 bool bdev_free; 1463 bool bdev_free;
1440 1464
@@ -1443,7 +1467,6 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
1443 * are protected with bdev_lock. bd_mutex is to 1467 * are protected with bdev_lock. bd_mutex is to
1444 * synchronize disk_holder unlinking. 1468 * synchronize disk_holder unlinking.
1445 */ 1469 */
1446 mutex_lock(&bdev->bd_mutex);
1447 spin_lock(&bdev_lock); 1470 spin_lock(&bdev_lock);
1448 1471
1449 WARN_ON_ONCE(--bdev->bd_holders < 0); 1472 WARN_ON_ONCE(--bdev->bd_holders < 0);
@@ -1461,17 +1484,21 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
1461 * If this was the last claim, remove holder link and 1484 * If this was the last claim, remove holder link and
1462 * unblock evpoll if it was a write holder. 1485 * unblock evpoll if it was a write holder.
1463 */ 1486 */
1464 if (bdev_free) { 1487 if (bdev_free && bdev->bd_write_holder) {
1465 if (bdev->bd_write_holder) { 1488 disk_unblock_events(bdev->bd_disk);
1466 disk_unblock_events(bdev->bd_disk); 1489 bdev->bd_write_holder = false;
1467 disk_check_events(bdev->bd_disk);
1468 bdev->bd_write_holder = false;
1469 }
1470 } 1490 }
1471
1472 mutex_unlock(&bdev->bd_mutex);
1473 } 1491 }
1474 1492
1493 /*
1494 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1495 * event. This is to ensure detection of media removal commanded
1496 * from userland - e.g. eject(1).
1497 */
1498 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1499
1500 mutex_unlock(&bdev->bd_mutex);
1501
1475 return __blkdev_put(bdev, mode, 0); 1502 return __blkdev_put(bdev, mode, 0);
1476} 1503}
1477EXPORT_SYMBOL(blkdev_put); 1504EXPORT_SYMBOL(blkdev_put);