aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-15 03:11:26 -0400
committerKent Overstreet <koverstreet@google.com>2013-05-15 03:48:14 -0400
commitf59fce847fc8483508b5028c24e2b1e00523dd88 (patch)
tree1701605027713bc535c2c21e7b4792433dc091a3 /drivers
parentfe0a797a6b42d9ad0ed063eaef705da1eb3c8147 (diff)
bcache: Fix error handling in init code
This code appears to have rotted... fix various bugs and do some refactoring. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c182
-rw-r--r--drivers/md/bcache/writeback.c2
4 files changed, 99 insertions, 121 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17f..d3e15b42a4ab 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1241struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1242void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1243int bch_btree_cache_alloc(struct cache_set *);
1244void bch_writeback_init_cached_dev(struct cached_dev *); 1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1245void bch_moving_init_cache_set(struct cache_set *);
1246 1246
1247void bch_cache_allocator_exit(struct cache *ca); 1247void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2a..b8730e714d69 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
93}; 93};
94static KTYPE(bch_stats); 94static KTYPE(bch_stats);
95 95
96static void scale_accounting(unsigned long data);
97
98void bch_cache_accounting_init(struct cache_accounting *acc,
99 struct closure *parent)
100{
101 kobject_init(&acc->total.kobj, &bch_stats_ktype);
102 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
103 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
104 kobject_init(&acc->day.kobj, &bch_stats_ktype);
105
106 closure_init(&acc->cl, parent);
107 init_timer(&acc->timer);
108 acc->timer.expires = jiffies + accounting_delay;
109 acc->timer.data = (unsigned long) acc;
110 acc->timer.function = scale_accounting;
111 add_timer(&acc->timer);
112}
113
114int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 96int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
115 struct kobject *parent) 97 struct kobject *parent)
116{ 98{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 227 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
246} 228}
229
230void bch_cache_accounting_init(struct cache_accounting *acc,
231 struct closure *parent)
232{
233 kobject_init(&acc->total.kobj, &bch_stats_ktype);
234 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
235 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
236 kobject_init(&acc->day.kobj, &bch_stats_ktype);
237
238 closure_init(&acc->cl, parent);
239 init_timer(&acc->timer);
240 acc->timer.expires = jiffies + accounting_delay;
241 acc->timer.data = (unsigned long) acc;
242 acc->timer.function = scale_accounting;
243 add_timer(&acc->timer);
244}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b09beb2b52c7..f88e2b653a3f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -731,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
731 731
732 if (d->c) 732 if (d->c)
733 bcache_device_detach(d); 733 bcache_device_detach(d);
734 734 if (d->disk && d->disk->flags & GENHD_FL_UP)
735 if (d->disk)
736 del_gendisk(d->disk); 735 del_gendisk(d->disk);
737 if (d->disk && d->disk->queue) 736 if (d->disk && d->disk->queue)
738 blk_cleanup_queue(d->disk->queue); 737 blk_cleanup_queue(d->disk->queue);
@@ -755,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
755 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
756 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
757 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
758 bio_split_pool_init(&d->bio_split_hook)) 757 bio_split_pool_init(&d->bio_split_hook) ||
759 758 !(d->disk = alloc_disk(1)) ||
760 return -ENOMEM; 759 !(q = blk_alloc_queue(GFP_KERNEL)))
761
762 d->disk = alloc_disk(1);
763 if (!d->disk)
764 return -ENOMEM; 760 return -ENOMEM;
765 761
766 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -770,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
770 d->disk->fops = &bcache_ops; 766 d->disk->fops = &bcache_ops;
771 d->disk->private_data = d; 767 d->disk->private_data = d;
772 768
773 q = blk_alloc_queue(GFP_KERNEL);
774 if (!q)
775 return -ENOMEM;
776
777 blk_queue_make_request(q, NULL); 769 blk_queue_make_request(q, NULL);
778 d->disk->queue = q; 770 d->disk->queue = q;
779 q->queuedata = d; 771 q->queuedata = d;
@@ -998,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
998 990
999 mutex_lock(&bch_register_lock); 991 mutex_lock(&bch_register_lock);
1000 992
1001 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 993 if (atomic_read(&dc->running))
994 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1002 bcache_device_free(&dc->disk); 995 bcache_device_free(&dc->disk);
1003 list_del(&dc->list); 996 list_del(&dc->list);
1004 997
1005 mutex_unlock(&bch_register_lock); 998 mutex_unlock(&bch_register_lock);
1006 999
1007 if (!IS_ERR_OR_NULL(dc->bdev)) { 1000 if (!IS_ERR_OR_NULL(dc->bdev)) {
1008 blk_sync_queue(bdev_get_queue(dc->bdev)); 1001 if (dc->bdev->bd_disk)
1002 blk_sync_queue(bdev_get_queue(dc->bdev));
1003
1009 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1004 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1010 } 1005 }
1011 1006
@@ -1027,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
1027 1022
1028static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1023static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1029{ 1024{
1030 int err; 1025 int ret;
1031 struct io *io; 1026 struct io *io;
1032 1027 struct request_queue *q = bdev_get_queue(dc->bdev);
1033 closure_init(&dc->disk.cl, NULL);
1034 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1035 1028
1036 __module_get(THIS_MODULE); 1029 __module_get(THIS_MODULE);
1037 INIT_LIST_HEAD(&dc->list); 1030 INIT_LIST_HEAD(&dc->list);
1031 closure_init(&dc->disk.cl, NULL);
1032 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1038 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1033 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1039
1040 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1041
1042 err = bcache_device_init(&dc->disk, block_size);
1043 if (err)
1044 goto err;
1045
1046 spin_lock_init(&dc->io_lock);
1047 closure_init_unlocked(&dc->sb_write);
1048 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1034 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1035 closure_init_unlocked(&dc->sb_write);
1036 INIT_LIST_HEAD(&dc->io_lru);
1037 spin_lock_init(&dc->io_lock);
1038 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1049 1039
1050 dc->sequential_merge = true; 1040 dc->sequential_merge = true;
1051 dc->sequential_cutoff = 4 << 20; 1041 dc->sequential_cutoff = 4 << 20;
1052 1042
1053 INIT_LIST_HEAD(&dc->io_lru);
1054 dc->sb_bio.bi_max_vecs = 1;
1055 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1056
1057 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1058 list_add(&io->lru, &dc->io_lru); 1044 list_add(&io->lru, &dc->io_lru);
1059 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1060 } 1046 }
1061 1047
1062 bch_writeback_init_cached_dev(dc); 1048 ret = bcache_device_init(&dc->disk, block_size);
1049 if (ret)
1050 return ret;
1051
1052 set_capacity(dc->disk.disk,
1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1054
1055 dc->disk.disk->queue->backing_dev_info.ra_pages =
1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1057 q->backing_dev_info.ra_pages);
1058
1059 bch_cached_dev_request_init(dc);
1060 bch_cached_dev_writeback_init(dc);
1063 return 0; 1061 return 0;
1064err:
1065 bcache_device_stop(&dc->disk);
1066 return err;
1067} 1062}
1068 1063
1069/* Cached device - bcache superblock */ 1064/* Cached device - bcache superblock */
1070 1065
1071static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1066static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1072 struct block_device *bdev, 1067 struct block_device *bdev,
1073 struct cached_dev *dc) 1068 struct cached_dev *dc)
1074{ 1069{
1075 char name[BDEVNAME_SIZE]; 1070 char name[BDEVNAME_SIZE];
1076 const char *err = "cannot allocate memory"; 1071 const char *err = "cannot allocate memory";
1077 struct gendisk *g;
1078 struct cache_set *c; 1072 struct cache_set *c;
1079 1073
1080 if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
1081 return err;
1082
1083 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1074 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1084 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1085 dc->bdev = bdev; 1075 dc->bdev = bdev;
1086 dc->bdev->bd_holder = dc; 1076 dc->bdev->bd_holder = dc;
1087 1077
1088 g = dc->disk.disk; 1078 bio_init(&dc->sb_bio);
1089 1079 dc->sb_bio.bi_max_vecs = 1;
1090 set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1080 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1091 1081 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1092 g->queue->backing_dev_info.ra_pages = 1082 get_page(sb_page);
1093 max(g->queue->backing_dev_info.ra_pages,
1094 bdev->bd_queue->backing_dev_info.ra_pages);
1095 1083
1096 bch_cached_dev_request_init(dc); 1084 if (cached_dev_init(dc, sb->block_size << 9))
1085 goto err;
1097 1086
1098 err = "error creating kobject"; 1087 err = "error creating kobject";
1099 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1088 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1102,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1102 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1091 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1103 goto err; 1092 goto err;
1104 1093
1094 pr_info("registered backing device %s", bdevname(bdev, name));
1095
1105 list_add(&dc->list, &uncached_devices); 1096 list_add(&dc->list, &uncached_devices);
1106 list_for_each_entry(c, &bch_cache_sets, list) 1097 list_for_each_entry(c, &bch_cache_sets, list)
1107 bch_cached_dev_attach(dc, c); 1098 bch_cached_dev_attach(dc, c);
@@ -1110,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1110 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1101 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1111 bch_cached_dev_run(dc); 1102 bch_cached_dev_run(dc);
1112 1103
1113 return NULL; 1104 return;
1114err: 1105err:
1115 kobject_put(&dc->disk.kobj);
1116 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1106 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1117 /* 1107 bcache_device_stop(&dc->disk);
1118 * Return NULL instead of an error because kobject_put() cleans
1119 * everything up
1120 */
1121 return NULL;
1122} 1108}
1123 1109
1124/* Flash only volumes */ 1110/* Flash only volumes */
@@ -1716,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1716 size_t free; 1702 size_t free;
1717 struct bucket *b; 1703 struct bucket *b;
1718 1704
1719 if (!ca)
1720 return -ENOMEM;
1721
1722 __module_get(THIS_MODULE); 1705 __module_get(THIS_MODULE);
1723 kobject_init(&ca->kobj, &bch_cache_ktype); 1706 kobject_init(&ca->kobj, &bch_cache_ktype);
1724 1707
1725 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1726
1727 INIT_LIST_HEAD(&ca->discards); 1708 INIT_LIST_HEAD(&ca->discards);
1728 1709
1729 bio_init(&ca->sb_bio);
1730 ca->sb_bio.bi_max_vecs = 1;
1731 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1732
1733 bio_init(&ca->journal.bio); 1710 bio_init(&ca->journal.bio);
1734 ca->journal.bio.bi_max_vecs = 8; 1711 ca->journal.bio.bi_max_vecs = 8;
1735 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1712 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1741,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1741 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1742 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1743 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
1744 !(ca->buckets = vmalloc(sizeof(struct bucket) * 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) *
1745 ca->sb.nbuckets)) || 1722 ca->sb.nbuckets)) ||
1746 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1747 2, GFP_KERNEL)) || 1724 2, GFP_KERNEL)) ||
1748 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1749 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1750 bio_split_pool_init(&ca->bio_split_hook)) 1727 bio_split_pool_init(&ca->bio_split_hook))
1751 goto err; 1728 return -ENOMEM;
1752 1729
1753 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1754 1731
1755 memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
1756 for_each_bucket(b, ca) 1732 for_each_bucket(b, ca)
1757 atomic_set(&b->pin, 0); 1733 atomic_set(&b->pin, 0);
1758 1734
@@ -1765,22 +1741,28 @@ err:
1765 return -ENOMEM; 1741 return -ENOMEM;
1766} 1742}
1767 1743
1768static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1744static void register_cache(struct cache_sb *sb, struct page *sb_page,
1769 struct block_device *bdev, struct cache *ca) 1745 struct block_device *bdev, struct cache *ca)
1770{ 1746{
1771 char name[BDEVNAME_SIZE]; 1747 char name[BDEVNAME_SIZE];
1772 const char *err = "cannot allocate memory"; 1748 const char *err = "cannot allocate memory";
1773 1749
1774 if (cache_alloc(sb, ca) != 0) 1750 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1775 return err;
1776
1777 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1778 ca->bdev = bdev; 1751 ca->bdev = bdev;
1779 ca->bdev->bd_holder = ca; 1752 ca->bdev->bd_holder = ca;
1780 1753
1754 bio_init(&ca->sb_bio);
1755 ca->sb_bio.bi_max_vecs = 1;
1756 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1757 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1758 get_page(sb_page);
1759
1781 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1760 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1782 ca->discard = CACHE_DISCARD(&ca->sb); 1761 ca->discard = CACHE_DISCARD(&ca->sb);
1783 1762
1763 if (cache_alloc(sb, ca) != 0)
1764 goto err;
1765
1784 err = "error creating kobject"; 1766 err = "error creating kobject";
1785 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1767 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
1786 goto err; 1768 goto err;
@@ -1790,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
1790 goto err; 1772 goto err;
1791 1773
1792 pr_info("registered cache device %s", bdevname(bdev, name)); 1774 pr_info("registered cache device %s", bdevname(bdev, name));
1793 1775 return;
1794 return NULL;
1795err: 1776err:
1777 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1796 kobject_put(&ca->kobj); 1778 kobject_put(&ca->kobj);
1797 pr_info("error opening %s: %s", bdevname(bdev, name), err);
1798 /* Return NULL instead of an error because kobject_put() cleans
1799 * everything up
1800 */
1801 return NULL;
1802} 1779}
1803 1780
1804/* Global interfaces/init */ 1781/* Global interfaces/init */
@@ -1832,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1832 bdev = blkdev_get_by_path(strim(path), 1809 bdev = blkdev_get_by_path(strim(path),
1833 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1834 sb); 1811 sb);
1835 if (bdev == ERR_PTR(-EBUSY)) 1812 if (IS_ERR(bdev)) {
1836 err = "device busy"; 1813 if (bdev == ERR_PTR(-EBUSY))
1837 1814 err = "device busy";
1838 if (IS_ERR(bdev) ||
1839 set_blocksize(bdev, 4096))
1840 goto err; 1815 goto err;
1816 }
1817
1818 err = "failed to set blocksize";
1819 if (set_blocksize(bdev, 4096))
1820 goto err_close;
1841 1821
1842 err = read_super(sb, bdev, &sb_page); 1822 err = read_super(sb, bdev, &sb_page);
1843 if (err) 1823 if (err)
@@ -1845,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1845 1825
1846 if (SB_IS_BDEV(sb)) { 1826 if (SB_IS_BDEV(sb)) {
1847 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1827 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1828 if (!dc)
1829 goto err_close;
1848 1830
1849 err = register_bdev(sb, sb_page, bdev, dc); 1831 register_bdev(sb, sb_page, bdev, dc);
1850 } else { 1832 } else {
1851 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1833 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1834 if (!ca)
1835 goto err_close;
1852 1836
1853 err = register_cache(sb, sb_page, bdev, ca); 1837 register_cache(sb, sb_page, bdev, ca);
1854 } 1838 }
1855 1839out:
1856 if (err) { 1840 if (sb_page)
1857 /* register_(bdev|cache) will only return an error if they
1858 * didn't get far enough to create the kobject - if they did,
1859 * the kobject destructor will do this cleanup.
1860 */
1861 put_page(sb_page); 1841 put_page(sb_page);
1862err_close:
1863 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1864err:
1865 if (attr != &ksysfs_register_quiet)
1866 pr_info("error opening %s: %s", path, err);
1867 ret = -EINVAL;
1868 }
1869
1870 kfree(sb); 1842 kfree(sb);
1871 kfree(path); 1843 kfree(path);
1872 mutex_unlock(&bch_register_lock); 1844 mutex_unlock(&bch_register_lock);
1873 module_put(THIS_MODULE); 1845 module_put(THIS_MODULE);
1874 return ret; 1846 return ret;
1847
1848err_close:
1849 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1850err:
1851 if (attr != &ksysfs_register_quiet)
1852 pr_info("error opening %s: %s", path, err);
1853 ret = -EINVAL;
1854 goto out;
1875} 1855}
1876 1856
1877static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1857static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd3..2714ed3991d1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
375 refill_dirty(cl); 375 refill_dirty(cl);
376} 376}
377 377
378void bch_writeback_init_cached_dev(struct cached_dev *dc) 378void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 379{
380 closure_init_unlocked(&dc->writeback); 380 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 381 init_rwsem(&dc->writeback_lock);