aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-11-03 18:04:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-03 18:04:26 -0500
commitce1928da8440eb5f73c20c5c88a449a4d59209d0 (patch)
treeddb766533d397f8c42f406c52364602ed4aa36e8
parentf4ca536f71ad69d9a974d0156d43b24b3f3112de (diff)
parente9226d7c9f1d83278d78675d51acc07e1a78cb27 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull ceph fixes from Sage Weil: "There is a GFP flag fix from Mike Christie, an error code fix from Jan, and fixes for two unnecessary allocations (kmalloc and workqueue) from Ilya. All are well tested. Ilya has one other fix on the way but it didn't get tested in time" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: libceph: eliminate unnecessary allocation in process_one_ticket() rbd: Fix error recovery in rbd_obj_read_sync() libceph: use memalloc flags for net IO rbd: use a single workqueue for all devices
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--net/ceph/auth_x.c25
-rw-r--r--net/ceph/messenger.c10
3 files changed, 38 insertions, 32 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0a54c588e433..27b71a0b72d0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -342,7 +342,6 @@ struct rbd_device {
342 342
343 struct list_head rq_queue; /* incoming rq queue */ 343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */ 344 spinlock_t lock; /* queue, flags, open_count */
345 struct workqueue_struct *rq_wq;
346 struct work_struct rq_work; 345 struct work_struct rq_work;
347 346
348 struct rbd_image_header header; 347 struct rbd_image_header header;
@@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache;
402static int rbd_major; 401static int rbd_major;
403static DEFINE_IDA(rbd_dev_id_ida); 402static DEFINE_IDA(rbd_dev_id_ida);
404 403
404static struct workqueue_struct *rbd_wq;
405
405/* 406/*
406 * Default to false for now, as single-major requires >= 0.75 version of 407 * Default to false for now, as single-major requires >= 0.75 version of
407 * userspace rbd utility. 408 * userspace rbd utility.
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
3452 } 3453 }
3453 3454
3454 if (queued) 3455 if (queued)
3455 queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); 3456 queue_work(rbd_wq, &rbd_dev->rq_work);
3456} 3457}
3457 3458
3458/* 3459/*
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3532 page_count = (u32) calc_pages_for(offset, length); 3533 page_count = (u32) calc_pages_for(offset, length);
3533 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 3534 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3534 if (IS_ERR(pages)) 3535 if (IS_ERR(pages))
3535 ret = PTR_ERR(pages); 3536 return PTR_ERR(pages);
3536 3537
3537 ret = -ENOMEM; 3538 ret = -ENOMEM;
3538 obj_request = rbd_obj_request_create(object_name, offset, length, 3539 obj_request = rbd_obj_request_create(object_name, offset, length,
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5242 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5243 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5243 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); 5244 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5244 5245
5245 rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
5246 rbd_dev->disk->disk_name);
5247 if (!rbd_dev->rq_wq) {
5248 ret = -ENOMEM;
5249 goto err_out_mapping;
5250 }
5251
5252 ret = rbd_bus_add_dev(rbd_dev); 5246 ret = rbd_bus_add_dev(rbd_dev);
5253 if (ret) 5247 if (ret)
5254 goto err_out_workqueue; 5248 goto err_out_mapping;
5255 5249
5256 /* Everything's ready. Announce the disk to the world. */ 5250 /* Everything's ready. Announce the disk to the world. */
5257 5251
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5263 5257
5264 return ret; 5258 return ret;
5265 5259
5266err_out_workqueue:
5267 destroy_workqueue(rbd_dev->rq_wq);
5268 rbd_dev->rq_wq = NULL;
5269err_out_mapping: 5260err_out_mapping:
5270 rbd_dev_mapping_clear(rbd_dev); 5261 rbd_dev_mapping_clear(rbd_dev);
5271err_out_disk: 5262err_out_disk:
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
5512{ 5503{
5513 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5504 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5514 5505
5515 destroy_workqueue(rbd_dev->rq_wq);
5516 rbd_free_disk(rbd_dev); 5506 rbd_free_disk(rbd_dev);
5517 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5507 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5518 rbd_dev_mapping_clear(rbd_dev); 5508 rbd_dev_mapping_clear(rbd_dev);
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
5716 if (rc) 5706 if (rc)
5717 return rc; 5707 return rc;
5718 5708
5709 /*
5710 * The number of active work items is limited by the number of
5711 * rbd devices, so leave @max_active at default.
5712 */
5713 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5714 if (!rbd_wq) {
5715 rc = -ENOMEM;
5716 goto err_out_slab;
5717 }
5718
5719 if (single_major) { 5719 if (single_major) {
5720 rbd_major = register_blkdev(0, RBD_DRV_NAME); 5720 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5721 if (rbd_major < 0) { 5721 if (rbd_major < 0) {
5722 rc = rbd_major; 5722 rc = rbd_major;
5723 goto err_out_slab; 5723 goto err_out_wq;
5724 } 5724 }
5725 } 5725 }
5726 5726
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
5738err_out_blkdev: 5738err_out_blkdev:
5739 if (single_major) 5739 if (single_major)
5740 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5740 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5741err_out_wq:
5742 destroy_workqueue(rbd_wq);
5741err_out_slab: 5743err_out_slab:
5742 rbd_slab_exit(); 5744 rbd_slab_exit();
5743 return rc; 5745 return rc;
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
5749 rbd_sysfs_cleanup(); 5751 rbd_sysfs_cleanup();
5750 if (single_major) 5752 if (single_major)
5751 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5753 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5754 destroy_workqueue(rbd_wq);
5752 rbd_slab_exit(); 5755 rbd_slab_exit();
5753} 5756}
5754 5757
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index de6662b14e1f..7e38b729696a 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
149 struct ceph_crypto_key old_key; 149 struct ceph_crypto_key old_key;
150 void *ticket_buf = NULL; 150 void *ticket_buf = NULL;
151 void *tp, *tpend; 151 void *tp, *tpend;
152 void **ptp;
152 struct ceph_timespec new_validity; 153 struct ceph_timespec new_validity;
153 struct ceph_crypto_key new_session_key; 154 struct ceph_crypto_key new_session_key;
154 struct ceph_buffer *new_ticket_blob; 155 struct ceph_buffer *new_ticket_blob;
@@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac,
208 goto out; 209 goto out;
209 } 210 }
210 tp = ticket_buf; 211 tp = ticket_buf;
211 dlen = ceph_decode_32(&tp); 212 ptp = &tp;
213 tpend = *ptp + dlen;
212 } else { 214 } else {
213 /* unencrypted */ 215 /* unencrypted */
214 ceph_decode_32_safe(p, end, dlen, bad); 216 ptp = p;
215 ticket_buf = kmalloc(dlen, GFP_NOFS); 217 tpend = end;
216 if (!ticket_buf) {
217 ret = -ENOMEM;
218 goto out;
219 }
220 tp = ticket_buf;
221 ceph_decode_need(p, end, dlen, bad);
222 ceph_decode_copy(p, ticket_buf, dlen);
223 } 218 }
224 tpend = tp + dlen; 219 ceph_decode_32_safe(ptp, tpend, dlen, bad);
225 dout(" ticket blob is %d bytes\n", dlen); 220 dout(" ticket blob is %d bytes\n", dlen);
226 ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); 221 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
227 blob_struct_v = ceph_decode_8(&tp); 222 blob_struct_v = ceph_decode_8(ptp);
228 new_secret_id = ceph_decode_64(&tp); 223 new_secret_id = ceph_decode_64(ptp);
229 ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); 224 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
230 if (ret) 225 if (ret)
231 goto out; 226 goto out;
232 227
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 559c9f619c20..8d1653caffdb 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
484 IPPROTO_TCP, &sock); 484 IPPROTO_TCP, &sock);
485 if (ret) 485 if (ret)
486 return ret; 486 return ret;
487 sock->sk->sk_allocation = GFP_NOFS; 487 sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
488 488
489#ifdef CONFIG_LOCKDEP 489#ifdef CONFIG_LOCKDEP
490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
@@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con)
509 509
510 return ret; 510 return ret;
511 } 511 }
512
513 sk_set_memalloc(sock->sk);
514
512 con->sock = sock; 515 con->sock = sock;
513 return 0; 516 return 0;
514} 517}
@@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work)
2769{ 2772{
2770 struct ceph_connection *con = container_of(work, struct ceph_connection, 2773 struct ceph_connection *con = container_of(work, struct ceph_connection,
2771 work.work); 2774 work.work);
2775 unsigned long pflags = current->flags;
2772 bool fault; 2776 bool fault;
2773 2777
2778 current->flags |= PF_MEMALLOC;
2779
2774 mutex_lock(&con->mutex); 2780 mutex_lock(&con->mutex);
2775 while (true) { 2781 while (true) {
2776 int ret; 2782 int ret;
@@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work)
2824 con_fault_finish(con); 2830 con_fault_finish(con);
2825 2831
2826 con->ops->put(con); 2832 con->ops->put(con);
2833
2834 tsk_restore_flags(current, pflags, PF_MEMALLOC);
2827} 2835}
2828 2836
2829/* 2837/*