aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-11 19:57:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-11 19:57:32 -0500
commit1545dec46db3858bbce84c2065b579e2925706ab (patch)
tree3b515a27bc0da314d2c79469171d70f9299d0956
parentab2781592a223510da6b19069a2ab8ba0e3ef2f5 (diff)
parent21acdf45f4958135940f0b4767185cf911d4b010 (diff)
Merge tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client
Pull ceph fixes from Ilya Dryomov: "Two rbd fixes for 4.12 and 4.2 issues respectively, marked for stable" * tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client: rbd: set max_segments to USHRT_MAX rbd: reacquire lock should update lock owner client id
-rw-r--r--drivers/block/rbd.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3047 mutex_unlock(&rbd_dev->watch_mutex); 3047 mutex_unlock(&rbd_dev->watch_mutex);
3048} 3048}
3049 3049
3050static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3051{
3052 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3053
3054 strcpy(rbd_dev->lock_cookie, cookie);
3055 rbd_set_owner_cid(rbd_dev, &cid);
3056 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3057}
3058
3050/* 3059/*
3051 * lock_rwsem must be held for write 3060 * lock_rwsem must be held for write
3052 */ 3061 */
3053static int rbd_lock(struct rbd_device *rbd_dev) 3062static int rbd_lock(struct rbd_device *rbd_dev)
3054{ 3063{
3055 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3064 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3056 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3057 char cookie[32]; 3065 char cookie[32];
3058 int ret; 3066 int ret;
3059 3067
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
3068 return ret; 3076 return ret;
3069 3077
3070 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3078 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3071 strcpy(rbd_dev->lock_cookie, cookie); 3079 __rbd_lock(rbd_dev, cookie);
3072 rbd_set_owner_cid(rbd_dev, &cid);
3073 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3074 return 0; 3080 return 0;
3075} 3081}
3076 3082
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3856 queue_delayed_work(rbd_dev->task_wq, 3862 queue_delayed_work(rbd_dev->task_wq,
3857 &rbd_dev->lock_dwork, 0); 3863 &rbd_dev->lock_dwork, 0);
3858 } else { 3864 } else {
3859 strcpy(rbd_dev->lock_cookie, cookie); 3865 __rbd_lock(rbd_dev, cookie);
3860 } 3866 }
3861} 3867}
3862 3868
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4381 segment_size = rbd_obj_bytes(&rbd_dev->header); 4387 segment_size = rbd_obj_bytes(&rbd_dev->header);
4382 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4383 q->limits.max_sectors = queue_max_hw_sectors(q); 4389 q->limits.max_sectors = queue_max_hw_sectors(q);
4384 blk_queue_max_segments(q, segment_size / SECTOR_SIZE); 4390 blk_queue_max_segments(q, USHRT_MAX);
4385 blk_queue_max_segment_size(q, segment_size); 4391 blk_queue_max_segment_size(q, segment_size);
4386 blk_queue_io_min(q, segment_size); 4392 blk_queue_io_min(q, segment_size);
4387 blk_queue_io_opt(q, segment_size); 4393 blk_queue_io_opt(q, segment_size);