diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 36 | ||||
-rw-r--r-- | fs/ceph/locks.c | 73 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 65 | ||||
-rw-r--r-- | fs/ceph/super.h | 9 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 1 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 4 | ||||
-rw-r--r-- | fs/proc/kmsg.c | 10 |
7 files changed, 113 insertions, 85 deletions
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx) | |||
141 | for (i = 0; i < ctx->nr_pages; i++) | 141 | for (i = 0; i < ctx->nr_pages; i++) |
142 | put_page(ctx->ring_pages[i]); | 142 | put_page(ctx->ring_pages[i]); |
143 | 143 | ||
144 | if (ctx->mmap_size) | ||
145 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | ||
146 | |||
147 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) | 144 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) |
148 | kfree(ctx->ring_pages); | 145 | kfree(ctx->ring_pages); |
149 | } | 146 | } |
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx) | |||
322 | 319 | ||
323 | aio_free_ring(ctx); | 320 | aio_free_ring(ctx); |
324 | 321 | ||
325 | spin_lock(&aio_nr_lock); | ||
326 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | ||
327 | aio_nr -= ctx->max_reqs; | ||
328 | spin_unlock(&aio_nr_lock); | ||
329 | |||
330 | pr_debug("freeing %p\n", ctx); | 322 | pr_debug("freeing %p\n", ctx); |
331 | 323 | ||
332 | /* | 324 | /* |
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx) | |||
435 | { | 427 | { |
436 | if (!atomic_xchg(&ctx->dead, 1)) { | 428 | if (!atomic_xchg(&ctx->dead, 1)) { |
437 | hlist_del_rcu(&ctx->list); | 429 | hlist_del_rcu(&ctx->list); |
438 | /* Between hlist_del_rcu() and dropping the initial ref */ | ||
439 | synchronize_rcu(); | ||
440 | 430 | ||
441 | /* | 431 | /* |
442 | * We can't punt to workqueue here because put_ioctx() -> | 432 | * It'd be more correct to do this in free_ioctx(), after all |
443 | * free_ioctx() will unmap the ringbuffer, and that has to be | 433 | * the outstanding kiocbs have finished - but by then io_destroy |
444 | * done in the original process's context. kill_ioctx_rcu/work() | 434 | * has already returned, so io_setup() could potentially return |
445 | * exist for exit_aio(), as in that path free_ioctx() won't do | 435 | * -EAGAIN with no ioctxs actually in use (as far as userspace |
446 | * the unmap. | 436 | * could tell). |
447 | */ | 437 | */ |
448 | kill_ioctx_work(&ctx->rcu_work); | 438 | spin_lock(&aio_nr_lock); |
439 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | ||
440 | aio_nr -= ctx->max_reqs; | ||
441 | spin_unlock(&aio_nr_lock); | ||
442 | |||
443 | if (ctx->mmap_size) | ||
444 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | ||
445 | |||
446 | /* Between hlist_del_rcu() and dropping the initial ref */ | ||
447 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | ||
449 | } | 448 | } |
450 | } | 449 | } |
451 | 450 | ||
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm) | |||
495 | */ | 494 | */ |
496 | ctx->mmap_size = 0; | 495 | ctx->mmap_size = 0; |
497 | 496 | ||
498 | if (!atomic_xchg(&ctx->dead, 1)) { | 497 | kill_ioctx(ctx); |
499 | hlist_del_rcu(&ctx->list); | ||
500 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | ||
501 | } | ||
502 | } | 498 | } |
503 | } | 499 | } |
504 | 500 | ||
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 202dd3d68be0..ebbf680378e2 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c | |||
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | /** | 193 | /** |
194 | * Encode the flock and fcntl locks for the given inode into the pagelist. | 194 | * Encode the flock and fcntl locks for the given inode into the ceph_filelock |
195 | * Format is: #fcntl locks, sequential fcntl locks, #flock locks, | 195 | * array. Must be called with lock_flocks() already held. |
196 | * sequential flock locks. | 196 | * If we encounter more of a specific lock type than expected, return -ENOSPC. |
197 | * Must be called with lock_flocks() already held. | ||
198 | * If we encounter more of a specific lock type than expected, | ||
199 | * we return the value 1. | ||
200 | */ | 197 | */ |
201 | int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | 198 | int ceph_encode_locks_to_buffer(struct inode *inode, |
202 | int num_fcntl_locks, int num_flock_locks) | 199 | struct ceph_filelock *flocks, |
200 | int num_fcntl_locks, int num_flock_locks) | ||
203 | { | 201 | { |
204 | struct file_lock *lock; | 202 | struct file_lock *lock; |
205 | struct ceph_filelock cephlock; | ||
206 | int err = 0; | 203 | int err = 0; |
207 | int seen_fcntl = 0; | 204 | int seen_fcntl = 0; |
208 | int seen_flock = 0; | 205 | int seen_flock = 0; |
206 | int l = 0; | ||
209 | 207 | ||
210 | dout("encoding %d flock and %d fcntl locks", num_flock_locks, | 208 | dout("encoding %d flock and %d fcntl locks", num_flock_locks, |
211 | num_fcntl_locks); | 209 | num_fcntl_locks); |
212 | err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); | 210 | |
213 | if (err) | ||
214 | goto fail; | ||
215 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | 211 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { |
216 | if (lock->fl_flags & FL_POSIX) { | 212 | if (lock->fl_flags & FL_POSIX) { |
217 | ++seen_fcntl; | 213 | ++seen_fcntl; |
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | |||
219 | err = -ENOSPC; | 215 | err = -ENOSPC; |
220 | goto fail; | 216 | goto fail; |
221 | } | 217 | } |
222 | err = lock_to_ceph_filelock(lock, &cephlock); | 218 | err = lock_to_ceph_filelock(lock, &flocks[l]); |
223 | if (err) | 219 | if (err) |
224 | goto fail; | 220 | goto fail; |
225 | err = ceph_pagelist_append(pagelist, &cephlock, | 221 | ++l; |
226 | sizeof(struct ceph_filelock)); | ||
227 | } | 222 | } |
228 | if (err) | ||
229 | goto fail; | ||
230 | } | 223 | } |
231 | |||
232 | err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); | ||
233 | if (err) | ||
234 | goto fail; | ||
235 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | 224 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { |
236 | if (lock->fl_flags & FL_FLOCK) { | 225 | if (lock->fl_flags & FL_FLOCK) { |
237 | ++seen_flock; | 226 | ++seen_flock; |
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | |||
239 | err = -ENOSPC; | 228 | err = -ENOSPC; |
240 | goto fail; | 229 | goto fail; |
241 | } | 230 | } |
242 | err = lock_to_ceph_filelock(lock, &cephlock); | 231 | err = lock_to_ceph_filelock(lock, &flocks[l]); |
243 | if (err) | 232 | if (err) |
244 | goto fail; | 233 | goto fail; |
245 | err = ceph_pagelist_append(pagelist, &cephlock, | 234 | ++l; |
246 | sizeof(struct ceph_filelock)); | ||
247 | } | 235 | } |
248 | if (err) | ||
249 | goto fail; | ||
250 | } | 236 | } |
251 | fail: | 237 | fail: |
252 | return err; | 238 | return err; |
253 | } | 239 | } |
254 | 240 | ||
241 | /** | ||
242 | * Copy the encoded flock and fcntl locks into the pagelist. | ||
243 | * Format is: #fcntl locks, sequential fcntl locks, #flock locks, | ||
244 | * sequential flock locks. | ||
245 | * Returns zero on success. | ||
246 | */ | ||
247 | int ceph_locks_to_pagelist(struct ceph_filelock *flocks, | ||
248 | struct ceph_pagelist *pagelist, | ||
249 | int num_fcntl_locks, int num_flock_locks) | ||
250 | { | ||
251 | int err = 0; | ||
252 | __le32 nlocks; | ||
253 | |||
254 | nlocks = cpu_to_le32(num_fcntl_locks); | ||
255 | err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); | ||
256 | if (err) | ||
257 | goto out_fail; | ||
258 | |||
259 | err = ceph_pagelist_append(pagelist, flocks, | ||
260 | num_fcntl_locks * sizeof(*flocks)); | ||
261 | if (err) | ||
262 | goto out_fail; | ||
263 | |||
264 | nlocks = cpu_to_le32(num_flock_locks); | ||
265 | err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); | ||
266 | if (err) | ||
267 | goto out_fail; | ||
268 | |||
269 | err = ceph_pagelist_append(pagelist, | ||
270 | &flocks[num_fcntl_locks], | ||
271 | num_flock_locks * sizeof(*flocks)); | ||
272 | out_fail: | ||
273 | return err; | ||
274 | } | ||
275 | |||
255 | /* | 276 | /* |
256 | * Given a pointer to a lock, convert it to a ceph filelock | 277 | * Given a pointer to a lock, convert it to a ceph filelock |
257 | */ | 278 | */ |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4f22671a5bd4..4d2920304be8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
2478 | 2478 | ||
2479 | if (recon_state->flock) { | 2479 | if (recon_state->flock) { |
2480 | int num_fcntl_locks, num_flock_locks; | 2480 | int num_fcntl_locks, num_flock_locks; |
2481 | struct ceph_pagelist_cursor trunc_point; | 2481 | struct ceph_filelock *flocks; |
2482 | 2482 | ||
2483 | ceph_pagelist_set_cursor(pagelist, &trunc_point); | 2483 | encode_again: |
2484 | do { | 2484 | lock_flocks(); |
2485 | lock_flocks(); | 2485 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); |
2486 | ceph_count_locks(inode, &num_fcntl_locks, | 2486 | unlock_flocks(); |
2487 | &num_flock_locks); | 2487 | flocks = kmalloc((num_fcntl_locks+num_flock_locks) * |
2488 | rec.v2.flock_len = (2*sizeof(u32) + | 2488 | sizeof(struct ceph_filelock), GFP_NOFS); |
2489 | (num_fcntl_locks+num_flock_locks) * | 2489 | if (!flocks) { |
2490 | sizeof(struct ceph_filelock)); | 2490 | err = -ENOMEM; |
2491 | unlock_flocks(); | 2491 | goto out_free; |
2492 | 2492 | } | |
2493 | /* pre-alloc pagelist */ | 2493 | lock_flocks(); |
2494 | ceph_pagelist_truncate(pagelist, &trunc_point); | 2494 | err = ceph_encode_locks_to_buffer(inode, flocks, |
2495 | err = ceph_pagelist_append(pagelist, &rec, reclen); | 2495 | num_fcntl_locks, |
2496 | if (!err) | 2496 | num_flock_locks); |
2497 | err = ceph_pagelist_reserve(pagelist, | 2497 | unlock_flocks(); |
2498 | rec.v2.flock_len); | 2498 | if (err) { |
2499 | 2499 | kfree(flocks); | |
2500 | /* encode locks */ | 2500 | if (err == -ENOSPC) |
2501 | if (!err) { | 2501 | goto encode_again; |
2502 | lock_flocks(); | 2502 | goto out_free; |
2503 | err = ceph_encode_locks(inode, | 2503 | } |
2504 | pagelist, | 2504 | /* |
2505 | num_fcntl_locks, | 2505 | * number of encoded locks is stable, so copy to pagelist |
2506 | num_flock_locks); | 2506 | */ |
2507 | unlock_flocks(); | 2507 | rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + |
2508 | } | 2508 | (num_fcntl_locks+num_flock_locks) * |
2509 | } while (err == -ENOSPC); | 2509 | sizeof(struct ceph_filelock)); |
2510 | err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
2511 | if (!err) | ||
2512 | err = ceph_locks_to_pagelist(flocks, pagelist, | ||
2513 | num_fcntl_locks, | ||
2514 | num_flock_locks); | ||
2515 | kfree(flocks); | ||
2510 | } else { | 2516 | } else { |
2511 | err = ceph_pagelist_append(pagelist, &rec, reclen); | 2517 | err = ceph_pagelist_append(pagelist, &rec, reclen); |
2512 | } | 2518 | } |
2513 | |||
2514 | out_free: | 2519 | out_free: |
2515 | kfree(path); | 2520 | kfree(path); |
2516 | out_dput: | 2521 | out_dput: |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 8696be2ff679..7ccfdb4aea2e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops; | |||
822 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); | 822 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); |
823 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); | 823 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); |
824 | extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); | 824 | extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); |
825 | extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, | 825 | extern int ceph_encode_locks_to_buffer(struct inode *inode, |
826 | int p_locks, int f_locks); | 826 | struct ceph_filelock *flocks, |
827 | int num_fcntl_locks, | ||
828 | int num_flock_locks); | ||
829 | extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, | ||
830 | struct ceph_pagelist *pagelist, | ||
831 | int num_fcntl_locks, int num_flock_locks); | ||
827 | extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); | 832 | extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); |
828 | 833 | ||
829 | /* debugfs.c */ | 834 | /* debugfs.c */ |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index b3fdd1a323d6..e68588e6b1e8 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -1408,6 +1408,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1408 | mres->lockname_len, mres->lockname); | 1408 | mres->lockname_len, mres->lockname); |
1409 | ret = -EFAULT; | 1409 | ret = -EFAULT; |
1410 | spin_unlock(&res->spinlock); | 1410 | spin_unlock(&res->spinlock); |
1411 | dlm_lockres_put(res); | ||
1411 | goto leave; | 1412 | goto leave; |
1412 | } | 1413 | } |
1413 | res->state |= DLM_LOCK_RES_MIGRATING; | 1414 | res->state |= DLM_LOCK_RES_MIGRATING; |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 04ee1b57c243..b4a5cdf9dbc5 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -947,7 +947,7 @@ leave: | |||
947 | ocfs2_free_dir_lookup_result(&orphan_insert); | 947 | ocfs2_free_dir_lookup_result(&orphan_insert); |
948 | ocfs2_free_dir_lookup_result(&lookup); | 948 | ocfs2_free_dir_lookup_result(&lookup); |
949 | 949 | ||
950 | if (status) | 950 | if (status && (status != -ENOTEMPTY)) |
951 | mlog_errno(status); | 951 | mlog_errno(status); |
952 | 952 | ||
953 | return status; | 953 | return status; |
@@ -2216,7 +2216,7 @@ out: | |||
2216 | 2216 | ||
2217 | brelse(orphan_dir_bh); | 2217 | brelse(orphan_dir_bh); |
2218 | 2218 | ||
2219 | return 0; | 2219 | return ret; |
2220 | } | 2220 | } |
2221 | 2221 | ||
2222 | int ocfs2_create_inode_in_orphan(struct inode *dir, | 2222 | int ocfs2_create_inode_in_orphan(struct inode *dir, |
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index bd4b5a740ff1..bdfabdaefdce 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c | |||
@@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait; | |||
21 | 21 | ||
22 | static int kmsg_open(struct inode * inode, struct file * file) | 22 | static int kmsg_open(struct inode * inode, struct file * file) |
23 | { | 23 | { |
24 | return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); | 24 | return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC); |
25 | } | 25 | } |
26 | 26 | ||
27 | static int kmsg_release(struct inode * inode, struct file * file) | 27 | static int kmsg_release(struct inode * inode, struct file * file) |
28 | { | 28 | { |
29 | (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); | 29 | (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC); |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
@@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf, | |||
34 | size_t count, loff_t *ppos) | 34 | size_t count, loff_t *ppos) |
35 | { | 35 | { |
36 | if ((file->f_flags & O_NONBLOCK) && | 36 | if ((file->f_flags & O_NONBLOCK) && |
37 | !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) | 37 | !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) |
38 | return -EAGAIN; | 38 | return -EAGAIN; |
39 | return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); | 39 | return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC); |
40 | } | 40 | } |
41 | 41 | ||
42 | static unsigned int kmsg_poll(struct file *file, poll_table *wait) | 42 | static unsigned int kmsg_poll(struct file *file, poll_table *wait) |
43 | { | 43 | { |
44 | poll_wait(file, &log_wait, wait); | 44 | poll_wait(file, &log_wait, wait); |
45 | if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) | 45 | if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) |
46 | return POLLIN | POLLRDNORM; | 46 | return POLLIN | POLLRDNORM; |
47 | return 0; | 47 | return 0; |
48 | } | 48 | } |