aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-11-18 21:43:46 -0500
committerJens Axboe <axboe@fb.com>2014-11-18 21:43:46 -0500
commitb3521729769ec71567a2e32a38609f87e781e41b (patch)
tree66a8494968706420c3eb043caa5868702d440d18 /drivers/block
parent139768895309c6c1d6913e909e9c9422f81a1640 (diff)
parentfc14f9c1272f62c3e8d01300f52467c0d9af50f9 (diff)
Merge branch 'master' into for-3.19/drivers
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/zram/zram_drv.c13
4 files changed, 34 insertions, 37 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8433bc8ead3d..caa61212fdb5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
450 450
451 ret = setup_commands(nq); 451 ret = setup_commands(nq);
452 if (ret) 452 if (ret)
453 goto err_queue; 453 return ret;
454 nullb->nr_queues++; 454 nullb->nr_queues++;
455 } 455 }
456
457 return 0; 456 return 0;
458err_queue:
459 cleanup_queues(nullb);
460 return ret;
461} 457}
462 458
463static int null_add_dev(void) 459static int null_add_dev(void)
@@ -507,7 +503,9 @@ static int null_add_dev(void)
507 goto out_cleanup_queues; 503 goto out_cleanup_queues;
508 } 504 }
509 blk_queue_make_request(nullb->q, null_queue_bio); 505 blk_queue_make_request(nullb->q, null_queue_bio);
510 init_driver_queues(nullb); 506 rv = init_driver_queues(nullb);
507 if (rv)
508 goto out_cleanup_blk_queue;
511 } else { 509 } else {
512 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 510 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
513 if (!nullb->q) { 511 if (!nullb->q) {
@@ -516,7 +514,9 @@ static int null_add_dev(void)
516 } 514 }
517 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 515 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
518 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 516 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
519 init_driver_queues(nullb); 517 rv = init_driver_queues(nullb);
518 if (rv)
519 goto out_cleanup_blk_queue;
520 } 520 }
521 521
522 nullb->q->queuedata = nullb; 522 nullb->q->queuedata = nullb;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0a54c588e433..27b71a0b72d0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -342,7 +342,6 @@ struct rbd_device {
342 342
343 struct list_head rq_queue; /* incoming rq queue */ 343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */ 344 spinlock_t lock; /* queue, flags, open_count */
345 struct workqueue_struct *rq_wq;
346 struct work_struct rq_work; 345 struct work_struct rq_work;
347 346
348 struct rbd_image_header header; 347 struct rbd_image_header header;
@@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache;
402static int rbd_major; 401static int rbd_major;
403static DEFINE_IDA(rbd_dev_id_ida); 402static DEFINE_IDA(rbd_dev_id_ida);
404 403
404static struct workqueue_struct *rbd_wq;
405
405/* 406/*
406 * Default to false for now, as single-major requires >= 0.75 version of 407 * Default to false for now, as single-major requires >= 0.75 version of
407 * userspace rbd utility. 408 * userspace rbd utility.
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
3452 } 3453 }
3453 3454
3454 if (queued) 3455 if (queued)
3455 queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); 3456 queue_work(rbd_wq, &rbd_dev->rq_work);
3456} 3457}
3457 3458
3458/* 3459/*
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3532 page_count = (u32) calc_pages_for(offset, length); 3533 page_count = (u32) calc_pages_for(offset, length);
3533 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 3534 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3534 if (IS_ERR(pages)) 3535 if (IS_ERR(pages))
3535 ret = PTR_ERR(pages); 3536 return PTR_ERR(pages);
3536 3537
3537 ret = -ENOMEM; 3538 ret = -ENOMEM;
3538 obj_request = rbd_obj_request_create(object_name, offset, length, 3539 obj_request = rbd_obj_request_create(object_name, offset, length,
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5242 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5243 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5243 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); 5244 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5244 5245
5245 rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
5246 rbd_dev->disk->disk_name);
5247 if (!rbd_dev->rq_wq) {
5248 ret = -ENOMEM;
5249 goto err_out_mapping;
5250 }
5251
5252 ret = rbd_bus_add_dev(rbd_dev); 5246 ret = rbd_bus_add_dev(rbd_dev);
5253 if (ret) 5247 if (ret)
5254 goto err_out_workqueue; 5248 goto err_out_mapping;
5255 5249
5256 /* Everything's ready. Announce the disk to the world. */ 5250 /* Everything's ready. Announce the disk to the world. */
5257 5251
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5263 5257
5264 return ret; 5258 return ret;
5265 5259
5266err_out_workqueue:
5267 destroy_workqueue(rbd_dev->rq_wq);
5268 rbd_dev->rq_wq = NULL;
5269err_out_mapping: 5260err_out_mapping:
5270 rbd_dev_mapping_clear(rbd_dev); 5261 rbd_dev_mapping_clear(rbd_dev);
5271err_out_disk: 5262err_out_disk:
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
5512{ 5503{
5513 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5504 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5514 5505
5515 destroy_workqueue(rbd_dev->rq_wq);
5516 rbd_free_disk(rbd_dev); 5506 rbd_free_disk(rbd_dev);
5517 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5507 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5518 rbd_dev_mapping_clear(rbd_dev); 5508 rbd_dev_mapping_clear(rbd_dev);
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
5716 if (rc) 5706 if (rc)
5717 return rc; 5707 return rc;
5718 5708
5709 /*
5710 * The number of active work items is limited by the number of
5711 * rbd devices, so leave @max_active at default.
5712 */
5713 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5714 if (!rbd_wq) {
5715 rc = -ENOMEM;
5716 goto err_out_slab;
5717 }
5718
5719 if (single_major) { 5719 if (single_major) {
5720 rbd_major = register_blkdev(0, RBD_DRV_NAME); 5720 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5721 if (rbd_major < 0) { 5721 if (rbd_major < 0) {
5722 rc = rbd_major; 5722 rc = rbd_major;
5723 goto err_out_slab; 5723 goto err_out_wq;
5724 } 5724 }
5725 } 5725 }
5726 5726
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
5738err_out_blkdev: 5738err_out_blkdev:
5739 if (single_major) 5739 if (single_major)
5740 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5740 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5741err_out_wq:
5742 destroy_workqueue(rbd_wq);
5741err_out_slab: 5743err_out_slab:
5742 rbd_slab_exit(); 5744 rbd_slab_exit();
5743 return rc; 5745 return rc;
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
5749 rbd_sysfs_cleanup(); 5751 rbd_sysfs_cleanup();
5750 if (single_major) 5752 if (single_major)
5751 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5753 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5754 destroy_workqueue(rbd_wq);
5752 rbd_slab_exit(); 5755 rbd_slab_exit();
5753} 5756}
5754 5757
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 756b8ec00f16..0ebadf93b6c5 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -69,8 +69,6 @@ struct vdc_port {
69 u8 vdisk_mtype; 69 u8 vdisk_mtype;
70 70
71 char disk_name[32]; 71 char disk_name[32];
72
73 struct vio_disk_vtoc label;
74}; 72};
75 73
76static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 74static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port)
710 if (comp.err) 708 if (comp.err)
711 return comp.err; 709 return comp.err;
712 710
713 err = generic_request(port, VD_OP_GET_VTOC,
714 &port->label, sizeof(port->label));
715 if (err < 0) {
716 printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
717 return err;
718 }
719
720 if (vdc_version_supported(port, 1, 1)) { 711 if (vdc_version_supported(port, 1, 1)) {
721 /* vdisk_size should be set during the handshake, if it wasn't 712 /* vdisk_size should be set during the handshake, if it wasn't
722 * then the underlying disk is reserved by another system 713 * then the underlying disk is reserved by another system
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e63e8aa8279..3920ee45aa59 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
99{ 99{
100 u64 val = 0; 100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev); 101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103 102
104 down_read(&zram->init_lock); 103 down_read(&zram->init_lock);
105 if (init_done(zram)) 104 if (init_done(zram)) {
105 struct zram_meta *meta = zram->meta;
106 val = zs_get_total_pages(meta->mem_pool); 106 val = zs_get_total_pages(meta->mem_pool);
107 }
107 up_read(&zram->init_lock); 108 up_read(&zram->init_lock);
108 109
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); 110 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
173 int err; 174 int err;
174 unsigned long val; 175 unsigned long val;
175 struct zram *zram = dev_to_zram(dev); 176 struct zram *zram = dev_to_zram(dev);
176 struct zram_meta *meta = zram->meta;
177 177
178 err = kstrtoul(buf, 10, &val); 178 err = kstrtoul(buf, 10, &val);
179 if (err || val != 0) 179 if (err || val != 0)
180 return -EINVAL; 180 return -EINVAL;
181 181
182 down_read(&zram->init_lock); 182 down_read(&zram->init_lock);
183 if (init_done(zram)) 183 if (init_done(zram)) {
184 struct zram_meta *meta = zram->meta;
184 atomic_long_set(&zram->stats.max_used_pages, 185 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool)); 186 zs_get_total_pages(meta->mem_pool));
187 }
186 up_read(&zram->init_lock); 188 up_read(&zram->init_lock);
187 189
188 return len; 190 return len;
@@ -558,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
558 } 560 }
559 561
560 if (page_zero_filled(uncmem)) { 562 if (page_zero_filled(uncmem)) {
561 kunmap_atomic(user_mem); 563 if (user_mem)
564 kunmap_atomic(user_mem);
562 /* Free memory associated with this sector now. */ 565 /* Free memory associated with this sector now. */
563 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); 566 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
564 zram_free_page(zram, index); 567 zram_free_page(zram, index);