aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--fs/ceph/locks.c73
-rw-r--r--fs/ceph/mds_client.c65
-rw-r--r--fs/ceph/super.h9
-rw-r--r--net/ceph/osd_client.c2
5 files changed, 108 insertions, 74 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5d..3063452e55da 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
519}; 519};
520 520
521/* 521/*
522 * Initialize an rbd client instance. 522 * Initialize an rbd client instance. Success or not, this function
523 * We own *ceph_opts. 523 * consumes ceph_opts.
524 */ 524 */
525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526{ 526{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
675 675
676/* 676/*
677 * Get a ceph client with specific addr and configuration, if one does 677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. 678 * not exist create it. Either way, ceph_opts is consumed by this
679 * function.
679 */ 680 */
680static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 681static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
681{ 682{
@@ -4697,8 +4698,10 @@ out:
4697 return ret; 4698 return ret;
4698} 4699}
4699 4700
4700/* Undo whatever state changes are made by v1 or v2 image probe */ 4701/*
4701 4702 * Undo whatever state changes are made by v1 or v2 header info
4703 * call.
4704 */
4702static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 4705static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4703{ 4706{
4704 struct rbd_image_header *header; 4707 struct rbd_image_header *header;
@@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4902 int tmp; 4905 int tmp;
4903 4906
4904 /* 4907 /*
4905 * Get the id from the image id object. If it's not a 4908 * Get the id from the image id object. Unless there's an
4906 * format 2 image, we'll get ENOENT back, and we'll assume 4909 * error, rbd_dev->spec->image_id will be filled in with
4907 * it's a format 1 image. 4910 * a dynamically-allocated string, and rbd_dev->image_format
4911 * will be set to either 1 or 2.
4908 */ 4912 */
4909 ret = rbd_dev_image_id(rbd_dev); 4913 ret = rbd_dev_image_id(rbd_dev);
4910 if (ret) 4914 if (ret)
@@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
4992 rc = PTR_ERR(rbdc); 4996 rc = PTR_ERR(rbdc);
4993 goto err_out_args; 4997 goto err_out_args;
4994 } 4998 }
4995 ceph_opts = NULL; /* rbd_dev client now owns this */
4996 4999
4997 /* pick the pool */ 5000 /* pick the pool */
4998 osdc = &rbdc->client->osdc; 5001 osdc = &rbdc->client->osdc;
@@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
5027 rbd_dev->mapping.read_only = read_only; 5030 rbd_dev->mapping.read_only = read_only;
5028 5031
5029 rc = rbd_dev_device_setup(rbd_dev); 5032 rc = rbd_dev_device_setup(rbd_dev);
5030 if (!rc) 5033 if (rc) {
5031 return count; 5034 rbd_dev_image_release(rbd_dev);
5035 goto err_out_module;
5036 }
5037
5038 return count;
5032 5039
5033 rbd_dev_image_release(rbd_dev);
5034err_out_rbd_dev: 5040err_out_rbd_dev:
5035 rbd_dev_destroy(rbd_dev); 5041 rbd_dev_destroy(rbd_dev);
5036err_out_client: 5042err_out_client:
5037 rbd_put_client(rbdc); 5043 rbd_put_client(rbdc);
5038err_out_args: 5044err_out_args:
5039 if (ceph_opts)
5040 ceph_destroy_options(ceph_opts);
5041 kfree(rbd_opts);
5042 rbd_spec_put(spec); 5045 rbd_spec_put(spec);
5043err_out_module: 5046err_out_module:
5044 module_put(THIS_MODULE); 5047 module_put(THIS_MODULE);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 202dd3d68be0..ebbf680378e2 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
191} 191}
192 192
193/** 193/**
194 * Encode the flock and fcntl locks for the given inode into the pagelist. 194 * Encode the flock and fcntl locks for the given inode into the ceph_filelock
195 * Format is: #fcntl locks, sequential fcntl locks, #flock locks, 195 * array. Must be called with lock_flocks() already held.
196 * sequential flock locks. 196 * If we encounter more of a specific lock type than expected, return -ENOSPC.
197 * Must be called with lock_flocks() already held.
198 * If we encounter more of a specific lock type than expected,
199 * we return the value 1.
200 */ 197 */
201int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, 198int ceph_encode_locks_to_buffer(struct inode *inode,
202 int num_fcntl_locks, int num_flock_locks) 199 struct ceph_filelock *flocks,
200 int num_fcntl_locks, int num_flock_locks)
203{ 201{
204 struct file_lock *lock; 202 struct file_lock *lock;
205 struct ceph_filelock cephlock;
206 int err = 0; 203 int err = 0;
207 int seen_fcntl = 0; 204 int seen_fcntl = 0;
208 int seen_flock = 0; 205 int seen_flock = 0;
206 int l = 0;
209 207
210 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 208 dout("encoding %d flock and %d fcntl locks", num_flock_locks,
211 num_fcntl_locks); 209 num_fcntl_locks);
212 err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); 210
213 if (err)
214 goto fail;
215 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 211 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
216 if (lock->fl_flags & FL_POSIX) { 212 if (lock->fl_flags & FL_POSIX) {
217 ++seen_fcntl; 213 ++seen_fcntl;
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
219 err = -ENOSPC; 215 err = -ENOSPC;
220 goto fail; 216 goto fail;
221 } 217 }
222 err = lock_to_ceph_filelock(lock, &cephlock); 218 err = lock_to_ceph_filelock(lock, &flocks[l]);
223 if (err) 219 if (err)
224 goto fail; 220 goto fail;
225 err = ceph_pagelist_append(pagelist, &cephlock, 221 ++l;
226 sizeof(struct ceph_filelock));
227 } 222 }
228 if (err)
229 goto fail;
230 } 223 }
231
232 err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
233 if (err)
234 goto fail;
235 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 224 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
236 if (lock->fl_flags & FL_FLOCK) { 225 if (lock->fl_flags & FL_FLOCK) {
237 ++seen_flock; 226 ++seen_flock;
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
239 err = -ENOSPC; 228 err = -ENOSPC;
240 goto fail; 229 goto fail;
241 } 230 }
242 err = lock_to_ceph_filelock(lock, &cephlock); 231 err = lock_to_ceph_filelock(lock, &flocks[l]);
243 if (err) 232 if (err)
244 goto fail; 233 goto fail;
245 err = ceph_pagelist_append(pagelist, &cephlock, 234 ++l;
246 sizeof(struct ceph_filelock));
247 } 235 }
248 if (err)
249 goto fail;
250 } 236 }
251fail: 237fail:
252 return err; 238 return err;
253} 239}
254 240
241/**
242 * Copy the encoded flock and fcntl locks into the pagelist.
243 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
244 * sequential flock locks.
245 * Returns zero on success.
246 */
247int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
248 struct ceph_pagelist *pagelist,
249 int num_fcntl_locks, int num_flock_locks)
250{
251 int err = 0;
252 __le32 nlocks;
253
254 nlocks = cpu_to_le32(num_fcntl_locks);
255 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
256 if (err)
257 goto out_fail;
258
259 err = ceph_pagelist_append(pagelist, flocks,
260 num_fcntl_locks * sizeof(*flocks));
261 if (err)
262 goto out_fail;
263
264 nlocks = cpu_to_le32(num_flock_locks);
265 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
266 if (err)
267 goto out_fail;
268
269 err = ceph_pagelist_append(pagelist,
270 &flocks[num_fcntl_locks],
271 num_flock_locks * sizeof(*flocks));
272out_fail:
273 return err;
274}
275
255/* 276/*
256 * Given a pointer to a lock, convert it to a ceph filelock 277 * Given a pointer to a lock, convert it to a ceph filelock
257 */ 278 */
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f22671a5bd4..4d2920304be8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2478 2478
2479 if (recon_state->flock) { 2479 if (recon_state->flock) {
2480 int num_fcntl_locks, num_flock_locks; 2480 int num_fcntl_locks, num_flock_locks;
2481 struct ceph_pagelist_cursor trunc_point; 2481 struct ceph_filelock *flocks;
2482 2482
2483 ceph_pagelist_set_cursor(pagelist, &trunc_point); 2483encode_again:
2484 do { 2484 lock_flocks();
2485 lock_flocks(); 2485 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2486 ceph_count_locks(inode, &num_fcntl_locks, 2486 unlock_flocks();
2487 &num_flock_locks); 2487 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2488 rec.v2.flock_len = (2*sizeof(u32) + 2488 sizeof(struct ceph_filelock), GFP_NOFS);
2489 (num_fcntl_locks+num_flock_locks) * 2489 if (!flocks) {
2490 sizeof(struct ceph_filelock)); 2490 err = -ENOMEM;
2491 unlock_flocks(); 2491 goto out_free;
2492 2492 }
2493 /* pre-alloc pagelist */ 2493 lock_flocks();
2494 ceph_pagelist_truncate(pagelist, &trunc_point); 2494 err = ceph_encode_locks_to_buffer(inode, flocks,
2495 err = ceph_pagelist_append(pagelist, &rec, reclen); 2495 num_fcntl_locks,
2496 if (!err) 2496 num_flock_locks);
2497 err = ceph_pagelist_reserve(pagelist, 2497 unlock_flocks();
2498 rec.v2.flock_len); 2498 if (err) {
2499 2499 kfree(flocks);
2500 /* encode locks */ 2500 if (err == -ENOSPC)
2501 if (!err) { 2501 goto encode_again;
2502 lock_flocks(); 2502 goto out_free;
2503 err = ceph_encode_locks(inode, 2503 }
2504 pagelist, 2504 /*
2505 num_fcntl_locks, 2505 * number of encoded locks is stable, so copy to pagelist
2506 num_flock_locks); 2506 */
2507 unlock_flocks(); 2507 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2508 } 2508 (num_fcntl_locks+num_flock_locks) *
2509 } while (err == -ENOSPC); 2509 sizeof(struct ceph_filelock));
2510 err = ceph_pagelist_append(pagelist, &rec, reclen);
2511 if (!err)
2512 err = ceph_locks_to_pagelist(flocks, pagelist,
2513 num_fcntl_locks,
2514 num_flock_locks);
2515 kfree(flocks);
2510 } else { 2516 } else {
2511 err = ceph_pagelist_append(pagelist, &rec, reclen); 2517 err = ceph_pagelist_append(pagelist, &rec, reclen);
2512 } 2518 }
2513
2514out_free: 2519out_free:
2515 kfree(path); 2520 kfree(path);
2516out_dput: 2521out_dput:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 8696be2ff679..7ccfdb4aea2e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops;
822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); 822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); 823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); 824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
825extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, 825extern int ceph_encode_locks_to_buffer(struct inode *inode,
826 int p_locks, int f_locks); 826 struct ceph_filelock *flocks,
827 int num_fcntl_locks,
828 int num_flock_locks);
829extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
830 struct ceph_pagelist *pagelist,
831 int num_fcntl_locks, int num_flock_locks);
827extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); 832extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
828 833
829/* debugfs.c */ 834/* debugfs.c */
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d5953b87918c..3a246a6cab47 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1675,13 +1675,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1675 __register_request(osdc, req); 1675 __register_request(osdc, req);
1676 __unregister_linger_request(osdc, req); 1676 __unregister_linger_request(osdc, req);
1677 } 1677 }
1678 reset_changed_osds(osdc);
1678 mutex_unlock(&osdc->request_mutex); 1679 mutex_unlock(&osdc->request_mutex);
1679 1680
1680 if (needmap) { 1681 if (needmap) {
1681 dout("%d requests for down osds, need new map\n", needmap); 1682 dout("%d requests for down osds, need new map\n", needmap);
1682 ceph_monc_request_next_osdmap(&osdc->client->monc); 1683 ceph_monc_request_next_osdmap(&osdc->client->monc);
1683 } 1684 }
1684 reset_changed_osds(osdc);
1685} 1685}
1686 1686
1687 1687