diff options
Diffstat (limited to 'fs/block_dev.c')
| -rw-r--r-- | fs/block_dev.c | 77 |
1 files changed, 50 insertions, 27 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index b48c41bf0f86..975266be67d3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -49,23 +49,15 @@ inline struct block_device *I_BDEV(struct inode *inode) | |||
| 49 | } | 49 | } |
| 50 | EXPORT_SYMBOL(I_BDEV); | 50 | EXPORT_SYMBOL(I_BDEV); |
| 51 | 51 | ||
| 52 | /* | 52 | static void bdev_write_inode(struct inode *inode) |
| 53 | * Move the inode from its current bdi to a new bdi. Make sure the inode | ||
| 54 | * is clean before moving so that it doesn't linger on the old bdi. | ||
| 55 | */ | ||
| 56 | static void bdev_inode_switch_bdi(struct inode *inode, | ||
| 57 | struct backing_dev_info *dst) | ||
| 58 | { | 53 | { |
| 59 | while (true) { | 54 | spin_lock(&inode->i_lock); |
| 60 | spin_lock(&inode->i_lock); | 55 | while (inode->i_state & I_DIRTY) { |
| 61 | if (!(inode->i_state & I_DIRTY)) { | ||
| 62 | inode->i_data.backing_dev_info = dst; | ||
| 63 | spin_unlock(&inode->i_lock); | ||
| 64 | return; | ||
| 65 | } | ||
| 66 | spin_unlock(&inode->i_lock); | 56 | spin_unlock(&inode->i_lock); |
| 67 | WARN_ON_ONCE(write_inode_now(inode, true)); | 57 | WARN_ON_ONCE(write_inode_now(inode, true)); |
| 58 | spin_lock(&inode->i_lock); | ||
| 68 | } | 59 | } |
| 60 | spin_unlock(&inode->i_lock); | ||
| 69 | } | 61 | } |
| 70 | 62 | ||
| 71 | /* Kill _all_ buffers and pagecache , dirty or not.. */ | 63 | /* Kill _all_ buffers and pagecache , dirty or not.. */ |
| @@ -429,6 +421,46 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
| 429 | } | 421 | } |
| 430 | EXPORT_SYMBOL_GPL(bdev_write_page); | 422 | EXPORT_SYMBOL_GPL(bdev_write_page); |
| 431 | 423 | ||
| 424 | /** | ||
| 425 | * bdev_direct_access() - Get the address for directly-accessibly memory | ||
| 426 | * @bdev: The device containing the memory | ||
| 427 | * @sector: The offset within the device | ||
| 428 | * @addr: Where to put the address of the memory | ||
| 429 | * @pfn: The Page Frame Number for the memory | ||
| 430 | * @size: The number of bytes requested | ||
| 431 | * | ||
| 432 | * If a block device is made up of directly addressable memory, this function | ||
| 433 | * will tell the caller the PFN and the address of the memory. The address | ||
| 434 | * may be directly dereferenced within the kernel without the need to call | ||
| 435 | * ioremap(), kmap() or similar. The PFN is suitable for inserting into | ||
| 436 | * page tables. | ||
| 437 | * | ||
| 438 | * Return: negative errno if an error occurs, otherwise the number of bytes | ||
| 439 | * accessible at this address. | ||
| 440 | */ | ||
| 441 | long bdev_direct_access(struct block_device *bdev, sector_t sector, | ||
| 442 | void **addr, unsigned long *pfn, long size) | ||
| 443 | { | ||
| 444 | long avail; | ||
| 445 | const struct block_device_operations *ops = bdev->bd_disk->fops; | ||
| 446 | |||
| 447 | if (size < 0) | ||
| 448 | return size; | ||
| 449 | if (!ops->direct_access) | ||
| 450 | return -EOPNOTSUPP; | ||
| 451 | if ((sector + DIV_ROUND_UP(size, 512)) > | ||
| 452 | part_nr_sects_read(bdev->bd_part)) | ||
| 453 | return -ERANGE; | ||
| 454 | sector += get_start_sect(bdev); | ||
| 455 | if (sector % (PAGE_SIZE / 512)) | ||
| 456 | return -EINVAL; | ||
| 457 | avail = ops->direct_access(bdev, sector, addr, pfn, size); | ||
| 458 | if (!avail) | ||
| 459 | return -ERANGE; | ||
| 460 | return min(avail, size); | ||
| 461 | } | ||
| 462 | EXPORT_SYMBOL_GPL(bdev_direct_access); | ||
| 463 | |||
| 432 | /* | 464 | /* |
| 433 | * pseudo-fs | 465 | * pseudo-fs |
| 434 | */ | 466 | */ |
| @@ -584,7 +616,6 @@ struct block_device *bdget(dev_t dev) | |||
| 584 | inode->i_bdev = bdev; | 616 | inode->i_bdev = bdev; |
| 585 | inode->i_data.a_ops = &def_blk_aops; | 617 | inode->i_data.a_ops = &def_blk_aops; |
| 586 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); | 618 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); |
| 587 | inode->i_data.backing_dev_info = &default_backing_dev_info; | ||
| 588 | spin_lock(&bdev_lock); | 619 | spin_lock(&bdev_lock); |
| 589 | list_add(&bdev->bd_list, &all_bdevs); | 620 | list_add(&bdev->bd_list, &all_bdevs); |
| 590 | spin_unlock(&bdev_lock); | 621 | spin_unlock(&bdev_lock); |
| @@ -1145,8 +1176,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1145 | bdev->bd_queue = disk->queue; | 1176 | bdev->bd_queue = disk->queue; |
| 1146 | bdev->bd_contains = bdev; | 1177 | bdev->bd_contains = bdev; |
| 1147 | if (!partno) { | 1178 | if (!partno) { |
| 1148 | struct backing_dev_info *bdi; | ||
| 1149 | |||
| 1150 | ret = -ENXIO; | 1179 | ret = -ENXIO; |
| 1151 | bdev->bd_part = disk_get_part(disk, partno); | 1180 | bdev->bd_part = disk_get_part(disk, partno); |
| 1152 | if (!bdev->bd_part) | 1181 | if (!bdev->bd_part) |
| @@ -1172,11 +1201,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1172 | } | 1201 | } |
| 1173 | } | 1202 | } |
| 1174 | 1203 | ||
| 1175 | if (!ret) { | 1204 | if (!ret) |
| 1176 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1205 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
| 1177 | bdi = blk_get_backing_dev_info(bdev); | ||
| 1178 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); | ||
| 1179 | } | ||
| 1180 | 1206 | ||
| 1181 | /* | 1207 | /* |
| 1182 | * If the device is invalidated, rescan partition | 1208 | * If the device is invalidated, rescan partition |
| @@ -1203,8 +1229,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1203 | if (ret) | 1229 | if (ret) |
| 1204 | goto out_clear; | 1230 | goto out_clear; |
| 1205 | bdev->bd_contains = whole; | 1231 | bdev->bd_contains = whole; |
| 1206 | bdev_inode_switch_bdi(bdev->bd_inode, | ||
| 1207 | whole->bd_inode->i_data.backing_dev_info); | ||
| 1208 | bdev->bd_part = disk_get_part(disk, partno); | 1232 | bdev->bd_part = disk_get_part(disk, partno); |
| 1209 | if (!(disk->flags & GENHD_FL_UP) || | 1233 | if (!(disk->flags & GENHD_FL_UP) || |
| 1210 | !bdev->bd_part || !bdev->bd_part->nr_sects) { | 1234 | !bdev->bd_part || !bdev->bd_part->nr_sects) { |
| @@ -1244,7 +1268,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1244 | bdev->bd_disk = NULL; | 1268 | bdev->bd_disk = NULL; |
| 1245 | bdev->bd_part = NULL; | 1269 | bdev->bd_part = NULL; |
| 1246 | bdev->bd_queue = NULL; | 1270 | bdev->bd_queue = NULL; |
| 1247 | bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); | ||
| 1248 | if (bdev != bdev->bd_contains) | 1271 | if (bdev != bdev->bd_contains) |
| 1249 | __blkdev_put(bdev->bd_contains, mode, 1); | 1272 | __blkdev_put(bdev->bd_contains, mode, 1); |
| 1250 | bdev->bd_contains = NULL; | 1273 | bdev->bd_contains = NULL; |
| @@ -1464,11 +1487,11 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1464 | WARN_ON_ONCE(bdev->bd_holders); | 1487 | WARN_ON_ONCE(bdev->bd_holders); |
| 1465 | sync_blockdev(bdev); | 1488 | sync_blockdev(bdev); |
| 1466 | kill_bdev(bdev); | 1489 | kill_bdev(bdev); |
| 1467 | /* ->release can cause the old bdi to disappear, | 1490 | /* |
| 1468 | * so must switch it out first | 1491 | * ->release can cause the queue to disappear, so flush all |
| 1492 | * dirty data before. | ||
| 1469 | */ | 1493 | */ |
| 1470 | bdev_inode_switch_bdi(bdev->bd_inode, | 1494 | bdev_write_inode(bdev->bd_inode); |
| 1471 | &default_backing_dev_info); | ||
| 1472 | } | 1495 | } |
| 1473 | if (bdev->bd_contains == bdev) { | 1496 | if (bdev->bd_contains == bdev) { |
| 1474 | if (disk->fops->release) | 1497 | if (disk->fops->release) |
