diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 7 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 1 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 2 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 4 | ||||
-rw-r--r-- | fs/btrfs/super.c | 14 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 2 | ||||
-rw-r--r-- | fs/btrfs/tree-log.c | 1 | ||||
-rw-r--r-- | fs/cifs/cifs_debug.c | 6 | ||||
-rw-r--r-- | fs/cifs/file.c | 6 | ||||
-rw-r--r-- | fs/cifs/ioctl.c | 21 | ||||
-rw-r--r-- | fs/cifs/smbencrypt.c | 2 | ||||
-rw-r--r-- | fs/fuse/dev.c | 51 | ||||
-rw-r--r-- | fs/fuse/dir.c | 31 | ||||
-rw-r--r-- | fs/fuse/fuse_i.h | 2 | ||||
-rw-r--r-- | fs/fuse/inode.c | 5 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 49 | ||||
-rw-r--r-- | fs/kernfs/dir.c | 12 | ||||
-rw-r--r-- | fs/lockd/svc.c | 8 | ||||
-rw-r--r-- | fs/locks.c | 2 | ||||
-rw-r--r-- | fs/nfs/direct.c | 6 | ||||
-rw-r--r-- | fs/nfs/inode.c | 5 | ||||
-rw-r--r-- | fs/nfs/internal.h | 2 | ||||
-rw-r--r-- | fs/nfs/nfs4client.c | 44 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 21 | ||||
-rw-r--r-- | fs/nilfs2/nilfs.h | 2 | ||||
-rw-r--r-- | fs/nilfs2/segment.c | 44 | ||||
-rw-r--r-- | fs/nilfs2/segment.h | 5 | ||||
-rw-r--r-- | fs/quota/dquot.c | 83 | ||||
-rw-r--r-- | fs/quota/quota.c | 162 | ||||
-rw-r--r-- | fs/udf/file.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_qm_syscalls.c | 156 | ||||
-rw-r--r-- | fs/xfs/xfs_quotaops.c | 8 |
34 files changed, 483 insertions, 289 deletions
@@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
1140 | long ret = 0; | 1140 | long ret = 0; |
1141 | int copy_ret; | 1141 | int copy_ret; |
1142 | 1142 | ||
1143 | /* | ||
1144 | * The mutex can block and wake us up and that will cause | ||
1145 | * wait_event_interruptible_hrtimeout() to schedule without sleeping | ||
1146 | * and repeat. This should be rare enough that it doesn't cause | ||
1147 | * peformance issues. See the comment in read_events() for more detail. | ||
1148 | */ | ||
1149 | sched_annotate_sleep(); | ||
1143 | mutex_lock(&ctx->ring_lock); | 1150 | mutex_lock(&ctx->ring_lock); |
1144 | 1151 | ||
1145 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ | 1152 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7e607416755a..0b180708bf79 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1171,6 +1171,7 @@ struct btrfs_space_info { | |||
1171 | struct percpu_counter total_bytes_pinned; | 1171 | struct percpu_counter total_bytes_pinned; |
1172 | 1172 | ||
1173 | struct list_head list; | 1173 | struct list_head list; |
1174 | /* Protected by the spinlock 'lock'. */ | ||
1174 | struct list_head ro_bgs; | 1175 | struct list_head ro_bgs; |
1175 | 1176 | ||
1176 | struct rw_semaphore groups_sem; | 1177 | struct rw_semaphore groups_sem; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 15116585e714..a684086c3c81 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
9422 | * are still on the list after taking the semaphore | 9422 | * are still on the list after taking the semaphore |
9423 | */ | 9423 | */ |
9424 | list_del_init(&block_group->list); | 9424 | list_del_init(&block_group->list); |
9425 | list_del_init(&block_group->ro_list); | ||
9426 | if (list_empty(&block_group->space_info->block_groups[index])) { | 9425 | if (list_empty(&block_group->space_info->block_groups[index])) { |
9427 | kobj = block_group->space_info->block_group_kobjs[index]; | 9426 | kobj = block_group->space_info->block_group_kobjs[index]; |
9428 | block_group->space_info->block_group_kobjs[index] = NULL; | 9427 | block_group->space_info->block_group_kobjs[index] = NULL; |
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
9464 | btrfs_remove_free_space_cache(block_group); | 9463 | btrfs_remove_free_space_cache(block_group); |
9465 | 9464 | ||
9466 | spin_lock(&block_group->space_info->lock); | 9465 | spin_lock(&block_group->space_info->lock); |
9466 | list_del_init(&block_group->ro_list); | ||
9467 | block_group->space_info->total_bytes -= block_group->key.offset; | 9467 | block_group->space_info->total_bytes -= block_group->key.offset; |
9468 | block_group->space_info->bytes_readonly -= block_group->key.offset; | 9468 | block_group->space_info->bytes_readonly -= block_group->key.offset; |
9469 | block_group->space_info->disk_total -= block_group->key.offset * factor; | 9469 | block_group->space_info->disk_total -= block_group->key.offset * factor; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4ebabd237153..790dbae3343c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) | |||
2190 | 2190 | ||
2191 | next = next_state(state); | 2191 | next = next_state(state); |
2192 | 2192 | ||
2193 | failrec = (struct io_failure_record *)state->private; | 2193 | failrec = (struct io_failure_record *)(unsigned long)state->private; |
2194 | free_extent_state(state); | 2194 | free_extent_state(state); |
2195 | kfree(failrec); | 2195 | kfree(failrec); |
2196 | 2196 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9e1569ffbf6e..e427cb7ee12c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, | |||
3053 | 3053 | ||
3054 | ppath = btrfs_alloc_path(); | 3054 | ppath = btrfs_alloc_path(); |
3055 | if (!ppath) { | 3055 | if (!ppath) { |
3056 | btrfs_free_path(ppath); | 3056 | btrfs_free_path(path); |
3057 | return -ENOMEM; | 3057 | return -ENOMEM; |
3058 | } | 3058 | } |
3059 | 3059 | ||
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, | |||
3065 | path->search_commit_root = 1; | 3065 | path->search_commit_root = 1; |
3066 | path->skip_locking = 1; | 3066 | path->skip_locking = 1; |
3067 | 3067 | ||
3068 | ppath->search_commit_root = 1; | ||
3069 | ppath->skip_locking = 1; | ||
3068 | /* | 3070 | /* |
3069 | * trigger the readahead for extent tree csum tree and wait for | 3071 | * trigger the readahead for extent tree csum tree and wait for |
3070 | * completion. During readahead, the scrub is officially paused | 3072 | * completion. During readahead, the scrub is officially paused |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 60f7cbe815e9..6f49b2872a64 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
1000 | */ | 1000 | */ |
1001 | if (fs_info->pending_changes == 0) | 1001 | if (fs_info->pending_changes == 0) |
1002 | return 0; | 1002 | return 0; |
1003 | /* | ||
1004 | * A non-blocking test if the fs is frozen. We must not | ||
1005 | * start a new transaction here otherwise a deadlock | ||
1006 | * happens. The pending operations are delayed to the | ||
1007 | * next commit after thawing. | ||
1008 | */ | ||
1009 | if (__sb_start_write(sb, SB_FREEZE_WRITE, false)) | ||
1010 | __sb_end_write(sb, SB_FREEZE_WRITE); | ||
1011 | else | ||
1012 | return 0; | ||
1003 | trans = btrfs_start_transaction(root, 0); | 1013 | trans = btrfs_start_transaction(root, 0); |
1004 | } else { | ||
1005 | return PTR_ERR(trans); | ||
1006 | } | 1014 | } |
1015 | if (IS_ERR(trans)) | ||
1016 | return PTR_ERR(trans); | ||
1007 | } | 1017 | } |
1008 | return btrfs_commit_transaction(trans, root); | 1018 | return btrfs_commit_transaction(trans, root); |
1009 | } | 1019 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a605d4e2f2bc..e88b59d13439 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) | |||
2118 | unsigned long prev; | 2118 | unsigned long prev; |
2119 | unsigned long bit; | 2119 | unsigned long bit; |
2120 | 2120 | ||
2121 | prev = cmpxchg(&fs_info->pending_changes, 0, 0); | 2121 | prev = xchg(&fs_info->pending_changes, 0); |
2122 | if (!prev) | 2122 | if (!prev) |
2123 | return; | 2123 | return; |
2124 | 2124 | ||
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da16f2be..1a9585d4380a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2591 | } | 2591 | } |
2592 | 2592 | ||
2593 | if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { | 2593 | if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { |
2594 | blk_finish_plug(&plug); | ||
2594 | mutex_unlock(&log_root_tree->log_mutex); | 2595 | mutex_unlock(&log_root_tree->log_mutex); |
2595 | ret = root_log_ctx.log_ret; | 2596 | ret = root_log_ctx.log_ret; |
2596 | goto out; | 2597 | goto out; |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 9c56ef776407..7febcf2475c5 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags) | |||
606 | *flags = CIFSSEC_MUST_NTLMV2; | 606 | *flags = CIFSSEC_MUST_NTLMV2; |
607 | else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) | 607 | else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) |
608 | *flags = CIFSSEC_MUST_NTLM; | 608 | *flags = CIFSSEC_MUST_NTLM; |
609 | else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) | 609 | else if (CIFSSEC_MUST_LANMAN && |
610 | (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) | ||
610 | *flags = CIFSSEC_MUST_LANMAN; | 611 | *flags = CIFSSEC_MUST_LANMAN; |
611 | else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) | 612 | else if (CIFSSEC_MUST_PLNTXT && |
613 | (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) | ||
612 | *flags = CIFSSEC_MUST_PLNTXT; | 614 | *flags = CIFSSEC_MUST_PLNTXT; |
613 | 615 | ||
614 | *flags |= signflags; | 616 | *flags |= signflags; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 96b7e9b7706d..74f12877493a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
366 | struct cifsLockInfo *li, *tmp; | 366 | struct cifsLockInfo *li, *tmp; |
367 | struct cifs_fid fid; | 367 | struct cifs_fid fid; |
368 | struct cifs_pending_open open; | 368 | struct cifs_pending_open open; |
369 | bool oplock_break_cancelled; | ||
369 | 370 | ||
370 | spin_lock(&cifs_file_list_lock); | 371 | spin_lock(&cifs_file_list_lock); |
371 | if (--cifs_file->count > 0) { | 372 | if (--cifs_file->count > 0) { |
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
397 | } | 398 | } |
398 | spin_unlock(&cifs_file_list_lock); | 399 | spin_unlock(&cifs_file_list_lock); |
399 | 400 | ||
400 | cancel_work_sync(&cifs_file->oplock_break); | 401 | oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); |
401 | 402 | ||
402 | if (!tcon->need_reconnect && !cifs_file->invalidHandle) { | 403 | if (!tcon->need_reconnect && !cifs_file->invalidHandle) { |
403 | struct TCP_Server_Info *server = tcon->ses->server; | 404 | struct TCP_Server_Info *server = tcon->ses->server; |
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
409 | _free_xid(xid); | 410 | _free_xid(xid); |
410 | } | 411 | } |
411 | 412 | ||
413 | if (oplock_break_cancelled) | ||
414 | cifs_done_oplock_break(cifsi); | ||
415 | |||
412 | cifs_del_pending_open(&open); | 416 | cifs_del_pending_open(&open); |
413 | 417 | ||
414 | /* | 418 | /* |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 45cb59bcc791..8b7898b7670f 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, | |||
86 | } | 86 | } |
87 | 87 | ||
88 | src_inode = file_inode(src_file.file); | 88 | src_inode = file_inode(src_file.file); |
89 | rc = -EINVAL; | ||
90 | if (S_ISDIR(src_inode->i_mode)) | ||
91 | goto out_fput; | ||
89 | 92 | ||
90 | /* | 93 | /* |
91 | * Note: cifs case is easier than btrfs since server responsible for | 94 | * Note: cifs case is easier than btrfs since server responsible for |
92 | * checks for proper open modes and file type and if it wants | 95 | * checks for proper open modes and file type and if it wants |
93 | * server could even support copy of range where source = target | 96 | * server could even support copy of range where source = target |
94 | */ | 97 | */ |
95 | 98 | lock_two_nondirectories(target_inode, src_inode); | |
96 | /* so we do not deadlock racing two ioctls on same files */ | ||
97 | if (target_inode < src_inode) { | ||
98 | mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT); | ||
99 | mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); | ||
100 | } else { | ||
101 | mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT); | ||
102 | mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD); | ||
103 | } | ||
104 | 99 | ||
105 | /* determine range to clone */ | 100 | /* determine range to clone */ |
106 | rc = -EINVAL; | 101 | rc = -EINVAL; |
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, | |||
124 | out_unlock: | 119 | out_unlock: |
125 | /* although unlocking in the reverse order from locking is not | 120 | /* although unlocking in the reverse order from locking is not |
126 | strictly necessary here it is a little cleaner to be consistent */ | 121 | strictly necessary here it is a little cleaner to be consistent */ |
127 | if (target_inode < src_inode) { | 122 | unlock_two_nondirectories(src_inode, target_inode); |
128 | mutex_unlock(&src_inode->i_mutex); | ||
129 | mutex_unlock(&target_inode->i_mutex); | ||
130 | } else { | ||
131 | mutex_unlock(&target_inode->i_mutex); | ||
132 | mutex_unlock(&src_inode->i_mutex); | ||
133 | } | ||
134 | out_fput: | 123 | out_fput: |
135 | fdput(src_file); | 124 | fdput(src_file); |
136 | out_drop_write: | 125 | out_drop_write: |
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 6c1566366a66..a4232ec4f2ba 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, | |||
221 | } | 221 | } |
222 | 222 | ||
223 | rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); | 223 | rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); |
224 | memset(wpwd, 0, 129 * sizeof(__le16)); | 224 | memzero_explicit(wpwd, sizeof(wpwd)); |
225 | 225 | ||
226 | return rc; | 226 | return rc; |
227 | } | 227 | } |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ba1107977f2e..ed19a7d622fa 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req) | |||
131 | req->in.h.pid = current->pid; | 131 | req->in.h.pid = current->pid; |
132 | } | 132 | } |
133 | 133 | ||
134 | void fuse_set_initialized(struct fuse_conn *fc) | ||
135 | { | ||
136 | /* Make sure stores before this are seen on another CPU */ | ||
137 | smp_wmb(); | ||
138 | fc->initialized = 1; | ||
139 | } | ||
140 | |||
134 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) | 141 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
135 | { | 142 | { |
136 | return !fc->initialized || (for_background && fc->blocked); | 143 | return !fc->initialized || (for_background && fc->blocked); |
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, | |||
155 | if (intr) | 162 | if (intr) |
156 | goto out; | 163 | goto out; |
157 | } | 164 | } |
165 | /* Matches smp_wmb() in fuse_set_initialized() */ | ||
166 | smp_rmb(); | ||
158 | 167 | ||
159 | err = -ENOTCONN; | 168 | err = -ENOTCONN; |
160 | if (!fc->connected) | 169 | if (!fc->connected) |
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, | |||
253 | 262 | ||
254 | atomic_inc(&fc->num_waiting); | 263 | atomic_inc(&fc->num_waiting); |
255 | wait_event(fc->blocked_waitq, fc->initialized); | 264 | wait_event(fc->blocked_waitq, fc->initialized); |
265 | /* Matches smp_wmb() in fuse_set_initialized() */ | ||
266 | smp_rmb(); | ||
256 | req = fuse_request_alloc(0); | 267 | req = fuse_request_alloc(0); |
257 | if (!req) | 268 | if (!req) |
258 | req = get_reserved_req(fc, file); | 269 | req = get_reserved_req(fc, file); |
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
511 | } | 522 | } |
512 | EXPORT_SYMBOL_GPL(fuse_request_send); | 523 | EXPORT_SYMBOL_GPL(fuse_request_send); |
513 | 524 | ||
525 | static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) | ||
526 | { | ||
527 | if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) | ||
528 | args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; | ||
529 | |||
530 | if (fc->minor < 9) { | ||
531 | switch (args->in.h.opcode) { | ||
532 | case FUSE_LOOKUP: | ||
533 | case FUSE_CREATE: | ||
534 | case FUSE_MKNOD: | ||
535 | case FUSE_MKDIR: | ||
536 | case FUSE_SYMLINK: | ||
537 | case FUSE_LINK: | ||
538 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
539 | break; | ||
540 | case FUSE_GETATTR: | ||
541 | case FUSE_SETATTR: | ||
542 | args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | if (fc->minor < 12) { | ||
547 | switch (args->in.h.opcode) { | ||
548 | case FUSE_CREATE: | ||
549 | args->in.args[0].size = sizeof(struct fuse_open_in); | ||
550 | break; | ||
551 | case FUSE_MKNOD: | ||
552 | args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; | ||
553 | break; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | |||
514 | ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) | 558 | ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) |
515 | { | 559 | { |
516 | struct fuse_req *req; | 560 | struct fuse_req *req; |
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) | |||
520 | if (IS_ERR(req)) | 564 | if (IS_ERR(req)) |
521 | return PTR_ERR(req); | 565 | return PTR_ERR(req); |
522 | 566 | ||
567 | /* Needs to be done after fuse_get_req() so that fc->minor is valid */ | ||
568 | fuse_adjust_compat(fc, args); | ||
569 | |||
523 | req->in.h.opcode = args->in.h.opcode; | 570 | req->in.h.opcode = args->in.h.opcode; |
524 | req->in.h.nodeid = args->in.h.nodeid; | 571 | req->in.h.nodeid = args->in.h.nodeid; |
525 | req->in.numargs = args->in.numargs; | 572 | req->in.numargs = args->in.numargs; |
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
2127 | if (fc->connected) { | 2174 | if (fc->connected) { |
2128 | fc->connected = 0; | 2175 | fc->connected = 0; |
2129 | fc->blocked = 0; | 2176 | fc->blocked = 0; |
2130 | fc->initialized = 1; | 2177 | fuse_set_initialized(fc); |
2131 | end_io_requests(fc); | 2178 | end_io_requests(fc); |
2132 | end_queued_requests(fc); | 2179 | end_queued_requests(fc); |
2133 | end_polls(fc); | 2180 | end_polls(fc); |
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
2146 | spin_lock(&fc->lock); | 2193 | spin_lock(&fc->lock); |
2147 | fc->connected = 0; | 2194 | fc->connected = 0; |
2148 | fc->blocked = 0; | 2195 | fc->blocked = 0; |
2149 | fc->initialized = 1; | 2196 | fuse_set_initialized(fc); |
2150 | end_queued_requests(fc); | 2197 | end_queued_requests(fc); |
2151 | end_polls(fc); | 2198 | end_polls(fc); |
2152 | wake_up_all(&fc->blocked_waitq); | 2199 | wake_up_all(&fc->blocked_waitq); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 252b8a5de8b5..08e7b1a9d5d0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, | |||
156 | args->in.args[0].size = name->len + 1; | 156 | args->in.args[0].size = name->len + 1; |
157 | args->in.args[0].value = name->name; | 157 | args->in.args[0].value = name->name; |
158 | args->out.numargs = 1; | 158 | args->out.numargs = 1; |
159 | if (fc->minor < 9) | 159 | args->out.args[0].size = sizeof(struct fuse_entry_out); |
160 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
161 | else | ||
162 | args->out.args[0].size = sizeof(struct fuse_entry_out); | ||
163 | args->out.args[0].value = outarg; | 160 | args->out.args[0].value = outarg; |
164 | } | 161 | } |
165 | 162 | ||
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, | |||
422 | args.in.h.opcode = FUSE_CREATE; | 419 | args.in.h.opcode = FUSE_CREATE; |
423 | args.in.h.nodeid = get_node_id(dir); | 420 | args.in.h.nodeid = get_node_id(dir); |
424 | args.in.numargs = 2; | 421 | args.in.numargs = 2; |
425 | args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : | 422 | args.in.args[0].size = sizeof(inarg); |
426 | sizeof(inarg); | ||
427 | args.in.args[0].value = &inarg; | 423 | args.in.args[0].value = &inarg; |
428 | args.in.args[1].size = entry->d_name.len + 1; | 424 | args.in.args[1].size = entry->d_name.len + 1; |
429 | args.in.args[1].value = entry->d_name.name; | 425 | args.in.args[1].value = entry->d_name.name; |
430 | args.out.numargs = 2; | 426 | args.out.numargs = 2; |
431 | if (fc->minor < 9) | 427 | args.out.args[0].size = sizeof(outentry); |
432 | args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
433 | else | ||
434 | args.out.args[0].size = sizeof(outentry); | ||
435 | args.out.args[0].value = &outentry; | 428 | args.out.args[0].value = &outentry; |
436 | args.out.args[1].size = sizeof(outopen); | 429 | args.out.args[1].size = sizeof(outopen); |
437 | args.out.args[1].value = &outopen; | 430 | args.out.args[1].value = &outopen; |
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, | |||
539 | memset(&outarg, 0, sizeof(outarg)); | 532 | memset(&outarg, 0, sizeof(outarg)); |
540 | args->in.h.nodeid = get_node_id(dir); | 533 | args->in.h.nodeid = get_node_id(dir); |
541 | args->out.numargs = 1; | 534 | args->out.numargs = 1; |
542 | if (fc->minor < 9) | 535 | args->out.args[0].size = sizeof(outarg); |
543 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
544 | else | ||
545 | args->out.args[0].size = sizeof(outarg); | ||
546 | args->out.args[0].value = &outarg; | 536 | args->out.args[0].value = &outarg; |
547 | err = fuse_simple_request(fc, args); | 537 | err = fuse_simple_request(fc, args); |
548 | if (err) | 538 | if (err) |
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode, | |||
592 | inarg.umask = current_umask(); | 582 | inarg.umask = current_umask(); |
593 | args.in.h.opcode = FUSE_MKNOD; | 583 | args.in.h.opcode = FUSE_MKNOD; |
594 | args.in.numargs = 2; | 584 | args.in.numargs = 2; |
595 | args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : | 585 | args.in.args[0].size = sizeof(inarg); |
596 | sizeof(inarg); | ||
597 | args.in.args[0].value = &inarg; | 586 | args.in.args[0].value = &inarg; |
598 | args.in.args[1].size = entry->d_name.len + 1; | 587 | args.in.args[1].size = entry->d_name.len + 1; |
599 | args.in.args[1].value = entry->d_name.name; | 588 | args.in.args[1].value = entry->d_name.name; |
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, | |||
899 | args.in.args[0].size = sizeof(inarg); | 888 | args.in.args[0].size = sizeof(inarg); |
900 | args.in.args[0].value = &inarg; | 889 | args.in.args[0].value = &inarg; |
901 | args.out.numargs = 1; | 890 | args.out.numargs = 1; |
902 | if (fc->minor < 9) | 891 | args.out.args[0].size = sizeof(outarg); |
903 | args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
904 | else | ||
905 | args.out.args[0].size = sizeof(outarg); | ||
906 | args.out.args[0].value = &outarg; | 892 | args.out.args[0].value = &outarg; |
907 | err = fuse_simple_request(fc, &args); | 893 | err = fuse_simple_request(fc, &args); |
908 | if (!err) { | 894 | if (!err) { |
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args, | |||
1574 | args->in.args[0].size = sizeof(*inarg_p); | 1560 | args->in.args[0].size = sizeof(*inarg_p); |
1575 | args->in.args[0].value = inarg_p; | 1561 | args->in.args[0].value = inarg_p; |
1576 | args->out.numargs = 1; | 1562 | args->out.numargs = 1; |
1577 | if (fc->minor < 9) | 1563 | args->out.args[0].size = sizeof(*outarg_p); |
1578 | args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
1579 | else | ||
1580 | args->out.args[0].size = sizeof(*outarg_p); | ||
1581 | args->out.args[0].value = outarg_p; | 1564 | args->out.args[0].value = outarg_p; |
1582 | } | 1565 | } |
1583 | 1566 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e0fc6725d1d0..1cdfb07c1376 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); | |||
906 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, | 906 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, |
907 | struct file *file); | 907 | struct file *file); |
908 | 908 | ||
909 | void fuse_set_initialized(struct fuse_conn *fc); | ||
910 | |||
909 | #endif /* _FS_FUSE_I_H */ | 911 | #endif /* _FS_FUSE_I_H */ |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6749109f255d..f38256e4476e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
424 | args.in.h.opcode = FUSE_STATFS; | 424 | args.in.h.opcode = FUSE_STATFS; |
425 | args.in.h.nodeid = get_node_id(dentry->d_inode); | 425 | args.in.h.nodeid = get_node_id(dentry->d_inode); |
426 | args.out.numargs = 1; | 426 | args.out.numargs = 1; |
427 | args.out.args[0].size = | 427 | args.out.args[0].size = sizeof(outarg); |
428 | fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); | ||
429 | args.out.args[0].value = &outarg; | 428 | args.out.args[0].value = &outarg; |
430 | err = fuse_simple_request(fc, &args); | 429 | err = fuse_simple_request(fc, &args); |
431 | if (!err) | 430 | if (!err) |
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
898 | fc->max_write = max_t(unsigned, 4096, fc->max_write); | 897 | fc->max_write = max_t(unsigned, 4096, fc->max_write); |
899 | fc->conn_init = 1; | 898 | fc->conn_init = 1; |
900 | } | 899 | } |
901 | fc->initialized = 1; | 900 | fuse_set_initialized(fc); |
902 | wake_up_all(&fc->blocked_waitq); | 901 | wake_up_all(&fc->blocked_waitq); |
903 | } | 902 | } |
904 | 903 | ||
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c8b148bbdc8b..3e193cb36996 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
667 | 667 | ||
668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
669 | s64 change, struct gfs2_quota_data *qd, | 669 | s64 change, struct gfs2_quota_data *qd, |
670 | struct fs_disk_quota *fdq) | 670 | struct qc_dqblk *fdq) |
671 | { | 671 | { |
672 | struct inode *inode = &ip->i_inode; | 672 | struct inode *inode = &ip->i_inode; |
673 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 673 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
697 | be64_add_cpu(&q.qu_value, change); | 697 | be64_add_cpu(&q.qu_value, change); |
698 | qd->qd_qb.qb_value = q.qu_value; | 698 | qd->qd_qb.qb_value = q.qu_value; |
699 | if (fdq) { | 699 | if (fdq) { |
700 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | 700 | if (fdq->d_fieldmask & QC_SPC_SOFT) { |
701 | q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); | 701 | q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); |
702 | qd->qd_qb.qb_warn = q.qu_warn; | 702 | qd->qd_qb.qb_warn = q.qu_warn; |
703 | } | 703 | } |
704 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | 704 | if (fdq->d_fieldmask & QC_SPC_HARD) { |
705 | q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); | 705 | q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); |
706 | qd->qd_qb.qb_limit = q.qu_limit; | 706 | qd->qd_qb.qb_limit = q.qu_limit; |
707 | } | 707 | } |
708 | if (fdq->d_fieldmask & FS_DQ_BCOUNT) { | 708 | if (fdq->d_fieldmask & QC_SPACE) { |
709 | q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); | 709 | q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); |
710 | qd->qd_qb.qb_value = q.qu_value; | 710 | qd->qd_qb.qb_value = q.qu_value; |
711 | } | 711 | } |
712 | } | 712 | } |
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb, | |||
1497 | } | 1497 | } |
1498 | 1498 | ||
1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | 1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, |
1500 | struct fs_disk_quota *fdq) | 1500 | struct qc_dqblk *fdq) |
1501 | { | 1501 | { |
1502 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1502 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1503 | struct gfs2_quota_lvb *qlvb; | 1503 | struct gfs2_quota_lvb *qlvb; |
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1505 | struct gfs2_holder q_gh; | 1505 | struct gfs2_holder q_gh; |
1506 | int error; | 1506 | int error; |
1507 | 1507 | ||
1508 | memset(fdq, 0, sizeof(struct fs_disk_quota)); | 1508 | memset(fdq, 0, sizeof(*fdq)); |
1509 | 1509 | ||
1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | 1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) |
1511 | return -ESRCH; /* Crazy XFS error code */ | 1511 | return -ESRCH; /* Crazy XFS error code */ |
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1522 | goto out; | 1522 | goto out; |
1523 | 1523 | ||
1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; | 1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; |
1525 | fdq->d_version = FS_DQUOT_VERSION; | 1525 | fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; |
1526 | fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; | 1526 | fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; |
1527 | fdq->d_id = from_kqid_munged(current_user_ns(), qid); | 1527 | fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; |
1528 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; | ||
1529 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; | ||
1530 | fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; | ||
1531 | 1528 | ||
1532 | gfs2_glock_dq_uninit(&q_gh); | 1529 | gfs2_glock_dq_uninit(&q_gh); |
1533 | out: | 1530 | out: |
@@ -1536,10 +1533,10 @@ out: | |||
1536 | } | 1533 | } |
1537 | 1534 | ||
1538 | /* GFS2 only supports a subset of the XFS fields */ | 1535 | /* GFS2 only supports a subset of the XFS fields */ |
1539 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) | 1536 | #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) |
1540 | 1537 | ||
1541 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | 1538 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, |
1542 | struct fs_disk_quota *fdq) | 1539 | struct qc_dqblk *fdq) |
1543 | { | 1540 | { |
1544 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1541 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1545 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 1542 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | |||
1583 | goto out_i; | 1580 | goto out_i; |
1584 | 1581 | ||
1585 | /* If nothing has changed, this is a no-op */ | 1582 | /* If nothing has changed, this is a no-op */ |
1586 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | 1583 | if ((fdq->d_fieldmask & QC_SPC_SOFT) && |
1587 | ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) | 1584 | ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) |
1588 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | 1585 | fdq->d_fieldmask ^= QC_SPC_SOFT; |
1589 | 1586 | ||
1590 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | 1587 | if ((fdq->d_fieldmask & QC_SPC_HARD) && |
1591 | ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) | 1588 | ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) |
1592 | fdq->d_fieldmask ^= FS_DQ_BHARD; | 1589 | fdq->d_fieldmask ^= QC_SPC_HARD; |
1593 | 1590 | ||
1594 | if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && | 1591 | if ((fdq->d_fieldmask & QC_SPACE) && |
1595 | ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) | 1592 | ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) |
1596 | fdq->d_fieldmask ^= FS_DQ_BCOUNT; | 1593 | fdq->d_fieldmask ^= QC_SPACE; |
1597 | 1594 | ||
1598 | if (fdq->d_fieldmask == 0) | 1595 | if (fdq->d_fieldmask == 0) |
1599 | goto out_i; | 1596 | goto out_i; |
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 37989f02a226..2d881b381d2b 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c | |||
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns) | |||
201 | static int kernfs_name_compare(unsigned int hash, const char *name, | 201 | static int kernfs_name_compare(unsigned int hash, const char *name, |
202 | const void *ns, const struct kernfs_node *kn) | 202 | const void *ns, const struct kernfs_node *kn) |
203 | { | 203 | { |
204 | if (hash != kn->hash) | 204 | if (hash < kn->hash) |
205 | return hash - kn->hash; | 205 | return -1; |
206 | if (ns != kn->ns) | 206 | if (hash > kn->hash) |
207 | return ns - kn->ns; | 207 | return 1; |
208 | if (ns < kn->ns) | ||
209 | return -1; | ||
210 | if (ns > kn->ns) | ||
211 | return 1; | ||
208 | return strcmp(name, kn->name); | 212 | return strcmp(name, kn->name); |
209 | } | 213 | } |
210 | 214 | ||
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index e94c887da2d7..55505cbe11af 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -138,10 +138,6 @@ lockd(void *vrqstp) | |||
138 | 138 | ||
139 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); | 139 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); |
140 | 140 | ||
141 | if (!nlm_timeout) | ||
142 | nlm_timeout = LOCKD_DFLT_TIMEO; | ||
143 | nlmsvc_timeout = nlm_timeout * HZ; | ||
144 | |||
145 | /* | 141 | /* |
146 | * The main request loop. We don't terminate until the last | 142 | * The main request loop. We don't terminate until the last |
147 | * NFS mount or NFS daemon has gone away. | 143 | * NFS mount or NFS daemon has gone away. |
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void) | |||
350 | printk(KERN_WARNING | 346 | printk(KERN_WARNING |
351 | "lockd_up: no pid, %d users??\n", nlmsvc_users); | 347 | "lockd_up: no pid, %d users??\n", nlmsvc_users); |
352 | 348 | ||
349 | if (!nlm_timeout) | ||
350 | nlm_timeout = LOCKD_DFLT_TIMEO; | ||
351 | nlmsvc_timeout = nlm_timeout * HZ; | ||
352 | |||
353 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); | 353 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); |
354 | if (!serv) { | 354 | if (!serv) { |
355 | printk(KERN_WARNING "lockd_up: create service failed\n"); | 355 | printk(KERN_WARNING "lockd_up: create service failed\n"); |
diff --git a/fs/locks.c b/fs/locks.c index 735b8d3fa78c..59e2f905e4ff 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp) | |||
1702 | break; | 1702 | break; |
1703 | } | 1703 | } |
1704 | trace_generic_delete_lease(inode, fl); | 1704 | trace_generic_delete_lease(inode, fl); |
1705 | if (fl) | 1705 | if (fl && IS_LEASE(fl)) |
1706 | error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); | 1706 | error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); |
1707 | spin_unlock(&inode->i_lock); | 1707 | spin_unlock(&inode->i_lock); |
1708 | locks_dispose_list(&dispose); | 1708 | locks_dispose_list(&dispose); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 10bf07280f4a..294692ff83b1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, | |||
212 | */ | 212 | */ |
213 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) | 213 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
214 | { | 214 | { |
215 | struct inode *inode = iocb->ki_filp->f_mapping->host; | ||
216 | |||
217 | /* we only support swap file calling nfs_direct_IO */ | ||
218 | if (!IS_SWAPFILE(inode)) | ||
219 | return 0; | ||
220 | |||
215 | #ifndef CONFIG_NFS_SWAP | 221 | #ifndef CONFIG_NFS_SWAP |
216 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", | 222 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", |
217 | iocb->ki_filp, (long long) pos, iter->nr_segs); | 223 | iocb->ki_filp, (long long) pos, iter->nr_segs); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 4bffe637ea32..2211f6ba8736 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st | |||
352 | 352 | ||
353 | nfs_attr_check_mountpoint(sb, fattr); | 353 | nfs_attr_check_mountpoint(sb, fattr); |
354 | 354 | ||
355 | if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && | 355 | if (nfs_attr_use_mounted_on_fileid(fattr)) |
356 | !nfs_attr_use_mounted_on_fileid(fattr)) | 356 | fattr->fileid = fattr->mounted_on_fileid; |
357 | else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) | ||
357 | goto out_no_inode; | 358 | goto out_no_inode; |
358 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) | 359 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) |
359 | goto out_no_inode; | 360 | goto out_no_inode; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..b6f34bfa6fe8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr) | |||
31 | (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && | 31 | (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && |
32 | ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) | 32 | ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) |
33 | return 0; | 33 | return 0; |
34 | |||
35 | fattr->fileid = fattr->mounted_on_fileid; | ||
36 | return 1; | 34 | return 1; |
37 | } | 35 | } |
38 | 36 | ||
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 03311259b0c4..706ad10b8186 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) | |||
228 | kfree(clp->cl_serverowner); | 228 | kfree(clp->cl_serverowner); |
229 | kfree(clp->cl_serverscope); | 229 | kfree(clp->cl_serverscope); |
230 | kfree(clp->cl_implid); | 230 | kfree(clp->cl_implid); |
231 | kfree(clp->cl_owner_id); | ||
231 | } | 232 | } |
232 | 233 | ||
233 | void nfs4_free_client(struct nfs_client *clp) | 234 | void nfs4_free_client(struct nfs_client *clp) |
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep, | |||
452 | spin_unlock(&nn->nfs_client_lock); | 453 | spin_unlock(&nn->nfs_client_lock); |
453 | } | 454 | } |
454 | 455 | ||
456 | static bool nfs4_match_client_owner_id(const struct nfs_client *clp1, | ||
457 | const struct nfs_client *clp2) | ||
458 | { | ||
459 | if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL) | ||
460 | return true; | ||
461 | return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0; | ||
462 | } | ||
463 | |||
455 | /** | 464 | /** |
456 | * nfs40_walk_client_list - Find server that recognizes a client ID | 465 | * nfs40_walk_client_list - Find server that recognizes a client ID |
457 | * | 466 | * |
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
483 | if (pos->rpc_ops != new->rpc_ops) | 492 | if (pos->rpc_ops != new->rpc_ops) |
484 | continue; | 493 | continue; |
485 | 494 | ||
486 | if (pos->cl_proto != new->cl_proto) | ||
487 | continue; | ||
488 | |||
489 | if (pos->cl_minorversion != new->cl_minorversion) | 495 | if (pos->cl_minorversion != new->cl_minorversion) |
490 | continue; | 496 | continue; |
491 | 497 | ||
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
510 | if (pos->cl_clientid != new->cl_clientid) | 516 | if (pos->cl_clientid != new->cl_clientid) |
511 | continue; | 517 | continue; |
512 | 518 | ||
519 | if (!nfs4_match_client_owner_id(pos, new)) | ||
520 | continue; | ||
521 | |||
513 | atomic_inc(&pos->cl_count); | 522 | atomic_inc(&pos->cl_count); |
514 | spin_unlock(&nn->nfs_client_lock); | 523 | spin_unlock(&nn->nfs_client_lock); |
515 | 524 | ||
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b) | |||
566 | } | 575 | } |
567 | 576 | ||
568 | /* | 577 | /* |
569 | * Returns true if the server owners match | 578 | * Returns true if the server major ids match |
570 | */ | 579 | */ |
571 | static bool | 580 | static bool |
572 | nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) | 581 | nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) |
573 | { | 582 | { |
574 | struct nfs41_server_owner *o1 = a->cl_serverowner; | 583 | struct nfs41_server_owner *o1 = a->cl_serverowner; |
575 | struct nfs41_server_owner *o2 = b->cl_serverowner; | 584 | struct nfs41_server_owner *o2 = b->cl_serverowner; |
576 | 585 | ||
577 | if (o1->minor_id != o2->minor_id) { | ||
578 | dprintk("NFS: --> %s server owner minor IDs do not match\n", | ||
579 | __func__); | ||
580 | return false; | ||
581 | } | ||
582 | |||
583 | if (o1->major_id_sz != o2->major_id_sz) | 586 | if (o1->major_id_sz != o2->major_id_sz) |
584 | goto out_major_mismatch; | 587 | goto out_major_mismatch; |
585 | if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) | 588 | if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) |
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
621 | if (pos->rpc_ops != new->rpc_ops) | 624 | if (pos->rpc_ops != new->rpc_ops) |
622 | continue; | 625 | continue; |
623 | 626 | ||
624 | if (pos->cl_proto != new->cl_proto) | ||
625 | continue; | ||
626 | |||
627 | if (pos->cl_minorversion != new->cl_minorversion) | 627 | if (pos->cl_minorversion != new->cl_minorversion) |
628 | continue; | 628 | continue; |
629 | 629 | ||
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
639 | prev = pos; | 639 | prev = pos; |
640 | 640 | ||
641 | status = nfs_wait_client_init_complete(pos); | 641 | status = nfs_wait_client_init_complete(pos); |
642 | if (status == 0) { | 642 | if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { |
643 | nfs4_schedule_lease_recovery(pos); | 643 | nfs4_schedule_lease_recovery(pos); |
644 | status = nfs4_wait_clnt_recover(pos); | 644 | status = nfs4_wait_clnt_recover(pos); |
645 | } | 645 | } |
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
654 | if (!nfs4_match_clientids(pos, new)) | 654 | if (!nfs4_match_clientids(pos, new)) |
655 | continue; | 655 | continue; |
656 | 656 | ||
657 | if (!nfs4_match_serverowners(pos, new)) | 657 | /* |
658 | * Note that session trunking is just a special subcase of | ||
659 | * client id trunking. In either case, we want to fall back | ||
660 | * to using the existing nfs_client. | ||
661 | */ | ||
662 | if (!nfs4_check_clientid_trunking(pos, new)) | ||
663 | continue; | ||
664 | |||
665 | /* Unlike NFSv4.0, we know that NFSv4.1 always uses the | ||
666 | * uniform string, however someone might switch the | ||
667 | * uniquifier string on us. | ||
668 | */ | ||
669 | if (!nfs4_match_client_owner_id(pos, new)) | ||
658 | continue; | 670 | continue; |
659 | 671 | ||
660 | atomic_inc(&pos->cl_count); | 672 | atomic_inc(&pos->cl_count); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e7f8d5ff2581..c347705b0161 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) | |||
1117 | return 0; | 1117 | return 0; |
1118 | if ((delegation->type & fmode) != fmode) | 1118 | if ((delegation->type & fmode) != fmode) |
1119 | return 0; | 1119 | return 0; |
1120 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) | ||
1121 | return 0; | ||
1122 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) | 1120 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) |
1123 | return 0; | 1121 | return 0; |
1124 | nfs_mark_delegation_referenced(delegation); | 1122 | nfs_mark_delegation_referenced(delegation); |
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp, | |||
4917 | } | 4915 | } |
4918 | 4916 | ||
4919 | static unsigned int | 4917 | static unsigned int |
4920 | nfs4_init_nonuniform_client_string(const struct nfs_client *clp, | 4918 | nfs4_init_nonuniform_client_string(struct nfs_client *clp, |
4921 | char *buf, size_t len) | 4919 | char *buf, size_t len) |
4922 | { | 4920 | { |
4923 | unsigned int result; | 4921 | unsigned int result; |
4924 | 4922 | ||
4923 | if (clp->cl_owner_id != NULL) | ||
4924 | return strlcpy(buf, clp->cl_owner_id, len); | ||
4925 | |||
4925 | rcu_read_lock(); | 4926 | rcu_read_lock(); |
4926 | result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", | 4927 | result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", |
4927 | clp->cl_ipaddr, | 4928 | clp->cl_ipaddr, |
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp, | |||
4930 | rpc_peeraddr2str(clp->cl_rpcclient, | 4931 | rpc_peeraddr2str(clp->cl_rpcclient, |
4931 | RPC_DISPLAY_PROTO)); | 4932 | RPC_DISPLAY_PROTO)); |
4932 | rcu_read_unlock(); | 4933 | rcu_read_unlock(); |
4934 | clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); | ||
4933 | return result; | 4935 | return result; |
4934 | } | 4936 | } |
4935 | 4937 | ||
4936 | static unsigned int | 4938 | static unsigned int |
4937 | nfs4_init_uniform_client_string(const struct nfs_client *clp, | 4939 | nfs4_init_uniform_client_string(struct nfs_client *clp, |
4938 | char *buf, size_t len) | 4940 | char *buf, size_t len) |
4939 | { | 4941 | { |
4940 | const char *nodename = clp->cl_rpcclient->cl_nodename; | 4942 | const char *nodename = clp->cl_rpcclient->cl_nodename; |
4943 | unsigned int result; | ||
4944 | |||
4945 | if (clp->cl_owner_id != NULL) | ||
4946 | return strlcpy(buf, clp->cl_owner_id, len); | ||
4941 | 4947 | ||
4942 | if (nfs4_client_id_uniquifier[0] != '\0') | 4948 | if (nfs4_client_id_uniquifier[0] != '\0') |
4943 | return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", | 4949 | result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", |
4944 | clp->rpc_ops->version, | 4950 | clp->rpc_ops->version, |
4945 | clp->cl_minorversion, | 4951 | clp->cl_minorversion, |
4946 | nfs4_client_id_uniquifier, | 4952 | nfs4_client_id_uniquifier, |
4947 | nodename); | 4953 | nodename); |
4948 | return scnprintf(buf, len, "Linux NFSv%u.%u %s", | 4954 | else |
4955 | result = scnprintf(buf, len, "Linux NFSv%u.%u %s", | ||
4949 | clp->rpc_ops->version, clp->cl_minorversion, | 4956 | clp->rpc_ops->version, clp->cl_minorversion, |
4950 | nodename); | 4957 | nodename); |
4958 | clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); | ||
4959 | return result; | ||
4951 | } | 4960 | } |
4952 | 4961 | ||
4953 | /* | 4962 | /* |
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 91093cd74f0d..385704027575 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h | |||
@@ -141,7 +141,6 @@ enum { | |||
141 | * @ti_save: Backup of journal_info field of task_struct | 141 | * @ti_save: Backup of journal_info field of task_struct |
142 | * @ti_flags: Flags | 142 | * @ti_flags: Flags |
143 | * @ti_count: Nest level | 143 | * @ti_count: Nest level |
144 | * @ti_garbage: List of inode to be put when releasing semaphore | ||
145 | */ | 144 | */ |
146 | struct nilfs_transaction_info { | 145 | struct nilfs_transaction_info { |
147 | u32 ti_magic; | 146 | u32 ti_magic; |
@@ -150,7 +149,6 @@ struct nilfs_transaction_info { | |||
150 | one of other filesystems has a bug. */ | 149 | one of other filesystems has a bug. */ |
151 | unsigned short ti_flags; | 150 | unsigned short ti_flags; |
152 | unsigned short ti_count; | 151 | unsigned short ti_count; |
153 | struct list_head ti_garbage; | ||
154 | }; | 152 | }; |
155 | 153 | ||
156 | /* ti_magic */ | 154 | /* ti_magic */ |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 7ef18fc656c2..469086b9f99b 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb, | |||
305 | ti->ti_count = 0; | 305 | ti->ti_count = 0; |
306 | ti->ti_save = cur_ti; | 306 | ti->ti_save = cur_ti; |
307 | ti->ti_magic = NILFS_TI_MAGIC; | 307 | ti->ti_magic = NILFS_TI_MAGIC; |
308 | INIT_LIST_HEAD(&ti->ti_garbage); | ||
309 | current->journal_info = ti; | 308 | current->journal_info = ti; |
310 | 309 | ||
311 | for (;;) { | 310 | for (;;) { |
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb) | |||
332 | 331 | ||
333 | up_write(&nilfs->ns_segctor_sem); | 332 | up_write(&nilfs->ns_segctor_sem); |
334 | current->journal_info = ti->ti_save; | 333 | current->journal_info = ti->ti_save; |
335 | if (!list_empty(&ti->ti_garbage)) | ||
336 | nilfs_dispose_list(nilfs, &ti->ti_garbage, 0); | ||
337 | } | 334 | } |
338 | 335 | ||
339 | static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, | 336 | static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, |
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs, | |||
746 | } | 743 | } |
747 | } | 744 | } |
748 | 745 | ||
746 | static void nilfs_iput_work_func(struct work_struct *work) | ||
747 | { | ||
748 | struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, | ||
749 | sc_iput_work); | ||
750 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; | ||
751 | |||
752 | nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); | ||
753 | } | ||
754 | |||
749 | static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, | 755 | static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, |
750 | struct nilfs_root *root) | 756 | struct nilfs_root *root) |
751 | { | 757 | { |
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, | |||
1900 | static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | 1906 | static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, |
1901 | struct the_nilfs *nilfs) | 1907 | struct the_nilfs *nilfs) |
1902 | { | 1908 | { |
1903 | struct nilfs_transaction_info *ti = current->journal_info; | ||
1904 | struct nilfs_inode_info *ii, *n; | 1909 | struct nilfs_inode_info *ii, *n; |
1910 | int defer_iput = false; | ||
1905 | 1911 | ||
1906 | spin_lock(&nilfs->ns_inode_lock); | 1912 | spin_lock(&nilfs->ns_inode_lock); |
1907 | list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { | 1913 | list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { |
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1912 | clear_bit(NILFS_I_BUSY, &ii->i_state); | 1918 | clear_bit(NILFS_I_BUSY, &ii->i_state); |
1913 | brelse(ii->i_bh); | 1919 | brelse(ii->i_bh); |
1914 | ii->i_bh = NULL; | 1920 | ii->i_bh = NULL; |
1915 | list_move_tail(&ii->i_dirty, &ti->ti_garbage); | 1921 | list_del_init(&ii->i_dirty); |
1922 | if (!ii->vfs_inode.i_nlink) { | ||
1923 | /* | ||
1924 | * Defer calling iput() to avoid a deadlock | ||
1925 | * over I_SYNC flag for inodes with i_nlink == 0 | ||
1926 | */ | ||
1927 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); | ||
1928 | defer_iput = true; | ||
1929 | } else { | ||
1930 | spin_unlock(&nilfs->ns_inode_lock); | ||
1931 | iput(&ii->vfs_inode); | ||
1932 | spin_lock(&nilfs->ns_inode_lock); | ||
1933 | } | ||
1916 | } | 1934 | } |
1917 | spin_unlock(&nilfs->ns_inode_lock); | 1935 | spin_unlock(&nilfs->ns_inode_lock); |
1936 | |||
1937 | if (defer_iput) | ||
1938 | schedule_work(&sci->sc_iput_work); | ||
1918 | } | 1939 | } |
1919 | 1940 | ||
1920 | /* | 1941 | /* |
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, | |||
2583 | INIT_LIST_HEAD(&sci->sc_segbufs); | 2604 | INIT_LIST_HEAD(&sci->sc_segbufs); |
2584 | INIT_LIST_HEAD(&sci->sc_write_logs); | 2605 | INIT_LIST_HEAD(&sci->sc_write_logs); |
2585 | INIT_LIST_HEAD(&sci->sc_gc_inodes); | 2606 | INIT_LIST_HEAD(&sci->sc_gc_inodes); |
2607 | INIT_LIST_HEAD(&sci->sc_iput_queue); | ||
2608 | INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); | ||
2586 | init_timer(&sci->sc_timer); | 2609 | init_timer(&sci->sc_timer); |
2587 | 2610 | ||
2588 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; | 2611 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; |
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) | |||
2609 | ret = nilfs_segctor_construct(sci, SC_LSEG_SR); | 2632 | ret = nilfs_segctor_construct(sci, SC_LSEG_SR); |
2610 | nilfs_transaction_unlock(sci->sc_super); | 2633 | nilfs_transaction_unlock(sci->sc_super); |
2611 | 2634 | ||
2635 | flush_work(&sci->sc_iput_work); | ||
2636 | |||
2612 | } while (ret && retrycount-- > 0); | 2637 | } while (ret && retrycount-- > 0); |
2613 | } | 2638 | } |
2614 | 2639 | ||
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |||
2633 | || sci->sc_seq_request != sci->sc_seq_done); | 2658 | || sci->sc_seq_request != sci->sc_seq_done); |
2634 | spin_unlock(&sci->sc_state_lock); | 2659 | spin_unlock(&sci->sc_state_lock); |
2635 | 2660 | ||
2661 | if (flush_work(&sci->sc_iput_work)) | ||
2662 | flag = true; | ||
2663 | |||
2636 | if (flag || !nilfs_segctor_confirm(sci)) | 2664 | if (flag || !nilfs_segctor_confirm(sci)) |
2637 | nilfs_segctor_write_out(sci); | 2665 | nilfs_segctor_write_out(sci); |
2638 | 2666 | ||
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |||
2642 | nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); | 2670 | nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); |
2643 | } | 2671 | } |
2644 | 2672 | ||
2673 | if (!list_empty(&sci->sc_iput_queue)) { | ||
2674 | nilfs_warning(sci->sc_super, __func__, | ||
2675 | "iput queue is not empty\n"); | ||
2676 | nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); | ||
2677 | } | ||
2678 | |||
2645 | WARN_ON(!list_empty(&sci->sc_segbufs)); | 2679 | WARN_ON(!list_empty(&sci->sc_segbufs)); |
2646 | WARN_ON(!list_empty(&sci->sc_write_logs)); | 2680 | WARN_ON(!list_empty(&sci->sc_write_logs)); |
2647 | 2681 | ||
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 38a1d0013314..a48d6de1e02c 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
29 | #include <linux/workqueue.h> | ||
29 | #include <linux/nilfs2_fs.h> | 30 | #include <linux/nilfs2_fs.h> |
30 | #include "nilfs.h" | 31 | #include "nilfs.h" |
31 | 32 | ||
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer { | |||
92 | * @sc_nblk_inc: Block count of current generation | 93 | * @sc_nblk_inc: Block count of current generation |
93 | * @sc_dirty_files: List of files to be written | 94 | * @sc_dirty_files: List of files to be written |
94 | * @sc_gc_inodes: List of GC inodes having blocks to be written | 95 | * @sc_gc_inodes: List of GC inodes having blocks to be written |
96 | * @sc_iput_queue: list of inodes for which iput should be done | ||
97 | * @sc_iput_work: work struct to defer iput call | ||
95 | * @sc_freesegs: array of segment numbers to be freed | 98 | * @sc_freesegs: array of segment numbers to be freed |
96 | * @sc_nfreesegs: number of segments on @sc_freesegs | 99 | * @sc_nfreesegs: number of segments on @sc_freesegs |
97 | * @sc_dsync_inode: inode whose data pages are written for a sync operation | 100 | * @sc_dsync_inode: inode whose data pages are written for a sync operation |
@@ -135,6 +138,8 @@ struct nilfs_sc_info { | |||
135 | 138 | ||
136 | struct list_head sc_dirty_files; | 139 | struct list_head sc_dirty_files; |
137 | struct list_head sc_gc_inodes; | 140 | struct list_head sc_gc_inodes; |
141 | struct list_head sc_iput_queue; | ||
142 | struct work_struct sc_iput_work; | ||
138 | 143 | ||
139 | __u64 *sc_freesegs; | 144 | __u64 *sc_freesegs; |
140 | size_t sc_nfreesegs; | 145 | size_t sc_nfreesegs; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8f0acef3d184..69df5b239844 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space) | |||
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | /* Generic routine for getting common part of quota structure */ | 2398 | /* Generic routine for getting common part of quota structure */ |
2399 | static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2399 | static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2400 | { | 2400 | { |
2401 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2401 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2402 | 2402 | ||
2403 | memset(di, 0, sizeof(*di)); | 2403 | memset(di, 0, sizeof(*di)); |
2404 | di->d_version = FS_DQUOT_VERSION; | ||
2405 | di->d_flags = dquot->dq_id.type == USRQUOTA ? | ||
2406 | FS_USER_QUOTA : FS_GROUP_QUOTA; | ||
2407 | di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); | ||
2408 | |||
2409 | spin_lock(&dq_data_lock); | 2404 | spin_lock(&dq_data_lock); |
2410 | di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); | 2405 | di->d_spc_hardlimit = dm->dqb_bhardlimit; |
2411 | di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); | 2406 | di->d_spc_softlimit = dm->dqb_bsoftlimit; |
2412 | di->d_ino_hardlimit = dm->dqb_ihardlimit; | 2407 | di->d_ino_hardlimit = dm->dqb_ihardlimit; |
2413 | di->d_ino_softlimit = dm->dqb_isoftlimit; | 2408 | di->d_ino_softlimit = dm->dqb_isoftlimit; |
2414 | di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; | 2409 | di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; |
2415 | di->d_icount = dm->dqb_curinodes; | 2410 | di->d_ino_count = dm->dqb_curinodes; |
2416 | di->d_btimer = dm->dqb_btime; | 2411 | di->d_spc_timer = dm->dqb_btime; |
2417 | di->d_itimer = dm->dqb_itime; | 2412 | di->d_ino_timer = dm->dqb_itime; |
2418 | spin_unlock(&dq_data_lock); | 2413 | spin_unlock(&dq_data_lock); |
2419 | } | 2414 | } |
2420 | 2415 | ||
2421 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | 2416 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, |
2422 | struct fs_disk_quota *di) | 2417 | struct qc_dqblk *di) |
2423 | { | 2418 | { |
2424 | struct dquot *dquot; | 2419 | struct dquot *dquot; |
2425 | 2420 | ||
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | |||
2433 | } | 2428 | } |
2434 | EXPORT_SYMBOL(dquot_get_dqblk); | 2429 | EXPORT_SYMBOL(dquot_get_dqblk); |
2435 | 2430 | ||
2436 | #define VFS_FS_DQ_MASK \ | 2431 | #define VFS_QC_MASK \ |
2437 | (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ | 2432 | (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ |
2438 | FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ | 2433 | QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ |
2439 | FS_DQ_BTIMER | FS_DQ_ITIMER) | 2434 | QC_SPC_TIMER | QC_INO_TIMER) |
2440 | 2435 | ||
2441 | /* Generic routine for setting common part of quota structure */ | 2436 | /* Generic routine for setting common part of quota structure */ |
2442 | static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2437 | static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2443 | { | 2438 | { |
2444 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2439 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2445 | int check_blim = 0, check_ilim = 0; | 2440 | int check_blim = 0, check_ilim = 0; |
2446 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; | 2441 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; |
2447 | 2442 | ||
2448 | if (di->d_fieldmask & ~VFS_FS_DQ_MASK) | 2443 | if (di->d_fieldmask & ~VFS_QC_MASK) |
2449 | return -EINVAL; | 2444 | return -EINVAL; |
2450 | 2445 | ||
2451 | if (((di->d_fieldmask & FS_DQ_BSOFT) && | 2446 | if (((di->d_fieldmask & QC_SPC_SOFT) && |
2452 | (di->d_blk_softlimit > dqi->dqi_maxblimit)) || | 2447 | stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || |
2453 | ((di->d_fieldmask & FS_DQ_BHARD) && | 2448 | ((di->d_fieldmask & QC_SPC_HARD) && |
2454 | (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || | 2449 | stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || |
2455 | ((di->d_fieldmask & FS_DQ_ISOFT) && | 2450 | ((di->d_fieldmask & QC_INO_SOFT) && |
2456 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || | 2451 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || |
2457 | ((di->d_fieldmask & FS_DQ_IHARD) && | 2452 | ((di->d_fieldmask & QC_INO_HARD) && |
2458 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) | 2453 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) |
2459 | return -ERANGE; | 2454 | return -ERANGE; |
2460 | 2455 | ||
2461 | spin_lock(&dq_data_lock); | 2456 | spin_lock(&dq_data_lock); |
2462 | if (di->d_fieldmask & FS_DQ_BCOUNT) { | 2457 | if (di->d_fieldmask & QC_SPACE) { |
2463 | dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; | 2458 | dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; |
2464 | check_blim = 1; | 2459 | check_blim = 1; |
2465 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | 2460 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); |
2466 | } | 2461 | } |
2467 | 2462 | ||
2468 | if (di->d_fieldmask & FS_DQ_BSOFT) | 2463 | if (di->d_fieldmask & QC_SPC_SOFT) |
2469 | dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); | 2464 | dm->dqb_bsoftlimit = di->d_spc_softlimit; |
2470 | if (di->d_fieldmask & FS_DQ_BHARD) | 2465 | if (di->d_fieldmask & QC_SPC_HARD) |
2471 | dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); | 2466 | dm->dqb_bhardlimit = di->d_spc_hardlimit; |
2472 | if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { | 2467 | if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { |
2473 | check_blim = 1; | 2468 | check_blim = 1; |
2474 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | 2469 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); |
2475 | } | 2470 | } |
2476 | 2471 | ||
2477 | if (di->d_fieldmask & FS_DQ_ICOUNT) { | 2472 | if (di->d_fieldmask & QC_INO_COUNT) { |
2478 | dm->dqb_curinodes = di->d_icount; | 2473 | dm->dqb_curinodes = di->d_ino_count; |
2479 | check_ilim = 1; | 2474 | check_ilim = 1; |
2480 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | 2475 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); |
2481 | } | 2476 | } |
2482 | 2477 | ||
2483 | if (di->d_fieldmask & FS_DQ_ISOFT) | 2478 | if (di->d_fieldmask & QC_INO_SOFT) |
2484 | dm->dqb_isoftlimit = di->d_ino_softlimit; | 2479 | dm->dqb_isoftlimit = di->d_ino_softlimit; |
2485 | if (di->d_fieldmask & FS_DQ_IHARD) | 2480 | if (di->d_fieldmask & QC_INO_HARD) |
2486 | dm->dqb_ihardlimit = di->d_ino_hardlimit; | 2481 | dm->dqb_ihardlimit = di->d_ino_hardlimit; |
2487 | if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { | 2482 | if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { |
2488 | check_ilim = 1; | 2483 | check_ilim = 1; |
2489 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | 2484 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); |
2490 | } | 2485 | } |
2491 | 2486 | ||
2492 | if (di->d_fieldmask & FS_DQ_BTIMER) { | 2487 | if (di->d_fieldmask & QC_SPC_TIMER) { |
2493 | dm->dqb_btime = di->d_btimer; | 2488 | dm->dqb_btime = di->d_spc_timer; |
2494 | check_blim = 1; | 2489 | check_blim = 1; |
2495 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | 2490 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); |
2496 | } | 2491 | } |
2497 | 2492 | ||
2498 | if (di->d_fieldmask & FS_DQ_ITIMER) { | 2493 | if (di->d_fieldmask & QC_INO_TIMER) { |
2499 | dm->dqb_itime = di->d_itimer; | 2494 | dm->dqb_itime = di->d_ino_timer; |
2500 | check_ilim = 1; | 2495 | check_ilim = 1; |
2501 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | 2496 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); |
2502 | } | 2497 | } |
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2506 | dm->dqb_curspace < dm->dqb_bsoftlimit) { | 2501 | dm->dqb_curspace < dm->dqb_bsoftlimit) { |
2507 | dm->dqb_btime = 0; | 2502 | dm->dqb_btime = 0; |
2508 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | 2503 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); |
2509 | } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) | 2504 | } else if (!(di->d_fieldmask & QC_SPC_TIMER)) |
2510 | /* Set grace only if user hasn't provided his own... */ | 2505 | /* Set grace only if user hasn't provided his own... */ |
2511 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | 2506 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; |
2512 | } | 2507 | } |
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2515 | dm->dqb_curinodes < dm->dqb_isoftlimit) { | 2510 | dm->dqb_curinodes < dm->dqb_isoftlimit) { |
2516 | dm->dqb_itime = 0; | 2511 | dm->dqb_itime = 0; |
2517 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | 2512 | clear_bit(DQ_INODES_B, &dquot->dq_flags); |
2518 | } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) | 2513 | } else if (!(di->d_fieldmask & QC_INO_TIMER)) |
2519 | /* Set grace only if user hasn't provided his own... */ | 2514 | /* Set grace only if user hasn't provided his own... */ |
2520 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | 2515 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; |
2521 | } | 2516 | } |
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2531 | } | 2526 | } |
2532 | 2527 | ||
2533 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, | 2528 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, |
2534 | struct fs_disk_quota *di) | 2529 | struct qc_dqblk *di) |
2535 | { | 2530 | { |
2536 | struct dquot *dquot; | 2531 | struct dquot *dquot; |
2537 | int rc; | 2532 | int rc; |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2aa4151f99d2..6f3856328eea 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr) | |||
118 | return sb->s_qcop->set_info(sb, type, &info); | 118 | return sb->s_qcop->set_info(sb, type, &info); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) | 121 | static inline qsize_t qbtos(qsize_t blocks) |
122 | { | ||
123 | return blocks << QIF_DQBLKSIZE_BITS; | ||
124 | } | ||
125 | |||
126 | static inline qsize_t stoqb(qsize_t space) | ||
127 | { | ||
128 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
129 | } | ||
130 | |||
131 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) | ||
122 | { | 132 | { |
123 | memset(dst, 0, sizeof(*dst)); | 133 | memset(dst, 0, sizeof(*dst)); |
124 | dst->dqb_bhardlimit = src->d_blk_hardlimit; | 134 | dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); |
125 | dst->dqb_bsoftlimit = src->d_blk_softlimit; | 135 | dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); |
126 | dst->dqb_curspace = src->d_bcount; | 136 | dst->dqb_curspace = src->d_space; |
127 | dst->dqb_ihardlimit = src->d_ino_hardlimit; | 137 | dst->dqb_ihardlimit = src->d_ino_hardlimit; |
128 | dst->dqb_isoftlimit = src->d_ino_softlimit; | 138 | dst->dqb_isoftlimit = src->d_ino_softlimit; |
129 | dst->dqb_curinodes = src->d_icount; | 139 | dst->dqb_curinodes = src->d_ino_count; |
130 | dst->dqb_btime = src->d_btimer; | 140 | dst->dqb_btime = src->d_spc_timer; |
131 | dst->dqb_itime = src->d_itimer; | 141 | dst->dqb_itime = src->d_ino_timer; |
132 | dst->dqb_valid = QIF_ALL; | 142 | dst->dqb_valid = QIF_ALL; |
133 | } | 143 | } |
134 | 144 | ||
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
136 | void __user *addr) | 146 | void __user *addr) |
137 | { | 147 | { |
138 | struct kqid qid; | 148 | struct kqid qid; |
139 | struct fs_disk_quota fdq; | 149 | struct qc_dqblk fdq; |
140 | struct if_dqblk idq; | 150 | struct if_dqblk idq; |
141 | int ret; | 151 | int ret; |
142 | 152 | ||
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
154 | return 0; | 164 | return 0; |
155 | } | 165 | } |
156 | 166 | ||
157 | static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) | 167 | static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) |
158 | { | 168 | { |
159 | dst->d_blk_hardlimit = src->dqb_bhardlimit; | 169 | dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); |
160 | dst->d_blk_softlimit = src->dqb_bsoftlimit; | 170 | dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); |
161 | dst->d_bcount = src->dqb_curspace; | 171 | dst->d_space = src->dqb_curspace; |
162 | dst->d_ino_hardlimit = src->dqb_ihardlimit; | 172 | dst->d_ino_hardlimit = src->dqb_ihardlimit; |
163 | dst->d_ino_softlimit = src->dqb_isoftlimit; | 173 | dst->d_ino_softlimit = src->dqb_isoftlimit; |
164 | dst->d_icount = src->dqb_curinodes; | 174 | dst->d_ino_count = src->dqb_curinodes; |
165 | dst->d_btimer = src->dqb_btime; | 175 | dst->d_spc_timer = src->dqb_btime; |
166 | dst->d_itimer = src->dqb_itime; | 176 | dst->d_ino_timer = src->dqb_itime; |
167 | 177 | ||
168 | dst->d_fieldmask = 0; | 178 | dst->d_fieldmask = 0; |
169 | if (src->dqb_valid & QIF_BLIMITS) | 179 | if (src->dqb_valid & QIF_BLIMITS) |
170 | dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; | 180 | dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; |
171 | if (src->dqb_valid & QIF_SPACE) | 181 | if (src->dqb_valid & QIF_SPACE) |
172 | dst->d_fieldmask |= FS_DQ_BCOUNT; | 182 | dst->d_fieldmask |= QC_SPACE; |
173 | if (src->dqb_valid & QIF_ILIMITS) | 183 | if (src->dqb_valid & QIF_ILIMITS) |
174 | dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; | 184 | dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; |
175 | if (src->dqb_valid & QIF_INODES) | 185 | if (src->dqb_valid & QIF_INODES) |
176 | dst->d_fieldmask |= FS_DQ_ICOUNT; | 186 | dst->d_fieldmask |= QC_INO_COUNT; |
177 | if (src->dqb_valid & QIF_BTIME) | 187 | if (src->dqb_valid & QIF_BTIME) |
178 | dst->d_fieldmask |= FS_DQ_BTIMER; | 188 | dst->d_fieldmask |= QC_SPC_TIMER; |
179 | if (src->dqb_valid & QIF_ITIME) | 189 | if (src->dqb_valid & QIF_ITIME) |
180 | dst->d_fieldmask |= FS_DQ_ITIMER; | 190 | dst->d_fieldmask |= QC_INO_TIMER; |
181 | } | 191 | } |
182 | 192 | ||
183 | static int quota_setquota(struct super_block *sb, int type, qid_t id, | 193 | static int quota_setquota(struct super_block *sb, int type, qid_t id, |
184 | void __user *addr) | 194 | void __user *addr) |
185 | { | 195 | { |
186 | struct fs_disk_quota fdq; | 196 | struct qc_dqblk fdq; |
187 | struct if_dqblk idq; | 197 | struct if_dqblk idq; |
188 | struct kqid qid; | 198 | struct kqid qid; |
189 | 199 | ||
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr) | |||
247 | return ret; | 257 | return ret; |
248 | } | 258 | } |
249 | 259 | ||
260 | /* | ||
261 | * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them | ||
262 | * out of there as xfsprogs rely on definitions being in that header file. So | ||
263 | * just define same functions here for quota purposes. | ||
264 | */ | ||
265 | #define XFS_BB_SHIFT 9 | ||
266 | |||
267 | static inline u64 quota_bbtob(u64 blocks) | ||
268 | { | ||
269 | return blocks << XFS_BB_SHIFT; | ||
270 | } | ||
271 | |||
272 | static inline u64 quota_btobb(u64 bytes) | ||
273 | { | ||
274 | return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; | ||
275 | } | ||
276 | |||
277 | static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) | ||
278 | { | ||
279 | dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); | ||
280 | dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); | ||
281 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
282 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
283 | dst->d_space = quota_bbtob(src->d_bcount); | ||
284 | dst->d_ino_count = src->d_icount; | ||
285 | dst->d_ino_timer = src->d_itimer; | ||
286 | dst->d_spc_timer = src->d_btimer; | ||
287 | dst->d_ino_warns = src->d_iwarns; | ||
288 | dst->d_spc_warns = src->d_bwarns; | ||
289 | dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); | ||
290 | dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); | ||
291 | dst->d_rt_space = quota_bbtob(src->d_rtbcount); | ||
292 | dst->d_rt_spc_timer = src->d_rtbtimer; | ||
293 | dst->d_rt_spc_warns = src->d_rtbwarns; | ||
294 | dst->d_fieldmask = 0; | ||
295 | if (src->d_fieldmask & FS_DQ_ISOFT) | ||
296 | dst->d_fieldmask |= QC_INO_SOFT; | ||
297 | if (src->d_fieldmask & FS_DQ_IHARD) | ||
298 | dst->d_fieldmask |= QC_INO_HARD; | ||
299 | if (src->d_fieldmask & FS_DQ_BSOFT) | ||
300 | dst->d_fieldmask |= QC_SPC_SOFT; | ||
301 | if (src->d_fieldmask & FS_DQ_BHARD) | ||
302 | dst->d_fieldmask |= QC_SPC_HARD; | ||
303 | if (src->d_fieldmask & FS_DQ_RTBSOFT) | ||
304 | dst->d_fieldmask |= QC_RT_SPC_SOFT; | ||
305 | if (src->d_fieldmask & FS_DQ_RTBHARD) | ||
306 | dst->d_fieldmask |= QC_RT_SPC_HARD; | ||
307 | if (src->d_fieldmask & FS_DQ_BTIMER) | ||
308 | dst->d_fieldmask |= QC_SPC_TIMER; | ||
309 | if (src->d_fieldmask & FS_DQ_ITIMER) | ||
310 | dst->d_fieldmask |= QC_INO_TIMER; | ||
311 | if (src->d_fieldmask & FS_DQ_RTBTIMER) | ||
312 | dst->d_fieldmask |= QC_RT_SPC_TIMER; | ||
313 | if (src->d_fieldmask & FS_DQ_BWARNS) | ||
314 | dst->d_fieldmask |= QC_SPC_WARNS; | ||
315 | if (src->d_fieldmask & FS_DQ_IWARNS) | ||
316 | dst->d_fieldmask |= QC_INO_WARNS; | ||
317 | if (src->d_fieldmask & FS_DQ_RTBWARNS) | ||
318 | dst->d_fieldmask |= QC_RT_SPC_WARNS; | ||
319 | if (src->d_fieldmask & FS_DQ_BCOUNT) | ||
320 | dst->d_fieldmask |= QC_SPACE; | ||
321 | if (src->d_fieldmask & FS_DQ_ICOUNT) | ||
322 | dst->d_fieldmask |= QC_INO_COUNT; | ||
323 | if (src->d_fieldmask & FS_DQ_RTBCOUNT) | ||
324 | dst->d_fieldmask |= QC_RT_SPACE; | ||
325 | } | ||
326 | |||
250 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, | 327 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, |
251 | void __user *addr) | 328 | void __user *addr) |
252 | { | 329 | { |
253 | struct fs_disk_quota fdq; | 330 | struct fs_disk_quota fdq; |
331 | struct qc_dqblk qdq; | ||
254 | struct kqid qid; | 332 | struct kqid qid; |
255 | 333 | ||
256 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | 334 | if (copy_from_user(&fdq, addr, sizeof(fdq))) |
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id, | |||
260 | qid = make_kqid(current_user_ns(), type, id); | 338 | qid = make_kqid(current_user_ns(), type, id); |
261 | if (!qid_valid(qid)) | 339 | if (!qid_valid(qid)) |
262 | return -EINVAL; | 340 | return -EINVAL; |
263 | return sb->s_qcop->set_dqblk(sb, qid, &fdq); | 341 | copy_from_xfs_dqblk(&qdq, &fdq); |
342 | return sb->s_qcop->set_dqblk(sb, qid, &qdq); | ||
343 | } | ||
344 | |||
345 | static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, | ||
346 | int type, qid_t id) | ||
347 | { | ||
348 | memset(dst, 0, sizeof(*dst)); | ||
349 | dst->d_version = FS_DQUOT_VERSION; | ||
350 | dst->d_id = id; | ||
351 | if (type == USRQUOTA) | ||
352 | dst->d_flags = FS_USER_QUOTA; | ||
353 | else if (type == PRJQUOTA) | ||
354 | dst->d_flags = FS_PROJ_QUOTA; | ||
355 | else | ||
356 | dst->d_flags = FS_GROUP_QUOTA; | ||
357 | dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); | ||
358 | dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); | ||
359 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
360 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
361 | dst->d_bcount = quota_btobb(src->d_space); | ||
362 | dst->d_icount = src->d_ino_count; | ||
363 | dst->d_itimer = src->d_ino_timer; | ||
364 | dst->d_btimer = src->d_spc_timer; | ||
365 | dst->d_iwarns = src->d_ino_warns; | ||
366 | dst->d_bwarns = src->d_spc_warns; | ||
367 | dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); | ||
368 | dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); | ||
369 | dst->d_rtbcount = quota_btobb(src->d_rt_space); | ||
370 | dst->d_rtbtimer = src->d_rt_spc_timer; | ||
371 | dst->d_rtbwarns = src->d_rt_spc_warns; | ||
264 | } | 372 | } |
265 | 373 | ||
266 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, | 374 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, |
267 | void __user *addr) | 375 | void __user *addr) |
268 | { | 376 | { |
269 | struct fs_disk_quota fdq; | 377 | struct fs_disk_quota fdq; |
378 | struct qc_dqblk qdq; | ||
270 | struct kqid qid; | 379 | struct kqid qid; |
271 | int ret; | 380 | int ret; |
272 | 381 | ||
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, | |||
275 | qid = make_kqid(current_user_ns(), type, id); | 384 | qid = make_kqid(current_user_ns(), type, id); |
276 | if (!qid_valid(qid)) | 385 | if (!qid_valid(qid)) |
277 | return -EINVAL; | 386 | return -EINVAL; |
278 | ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); | 387 | ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); |
279 | if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) | 388 | if (ret) |
389 | return ret; | ||
390 | copy_to_xfs_dqblk(&fdq, &qdq, type, id); | ||
391 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | ||
280 | return -EFAULT; | 392 | return -EFAULT; |
281 | return ret; | 393 | return ret; |
282 | } | 394 | } |
diff --git a/fs/udf/file.c b/fs/udf/file.c index bb15771b92ae..08f3555fbeac 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -224,7 +224,7 @@ out: | |||
224 | static int udf_release_file(struct inode *inode, struct file *filp) | 224 | static int udf_release_file(struct inode *inode, struct file *filp) |
225 | { | 225 | { |
226 | if (filp->f_mode & FMODE_WRITE && | 226 | if (filp->f_mode & FMODE_WRITE && |
227 | atomic_read(&inode->i_writecount) > 1) { | 227 | atomic_read(&inode->i_writecount) == 1) { |
228 | /* | 228 | /* |
229 | * Grab i_mutex to avoid races with writes changing i_size | 229 | * Grab i_mutex to avoid races with writes changing i_size |
230 | * while we are running. | 230 | * while we are running. |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 3a07a937e232..41f6c0b9d51c 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); | |||
166 | /* quota ops */ | 166 | /* quota ops */ |
167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); | 167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); |
168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, | 168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, |
169 | uint, struct fs_disk_quota *); | 169 | uint, struct qc_dqblk *); |
170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, | 170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, |
171 | struct fs_disk_quota *); | 171 | struct qc_dqblk *); |
172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, | 172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, |
173 | struct fs_quota_stat *); | 173 | struct fs_quota_stat *); |
174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, | 174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 74fca68e43b6..cb6168ec92c9 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); | |||
39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, | 39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, |
40 | uint); | 40 | uint); |
41 | STATIC uint xfs_qm_export_flags(uint); | 41 | STATIC uint xfs_qm_export_flags(uint); |
42 | STATIC uint xfs_qm_export_qtype_flags(uint); | ||
43 | 42 | ||
44 | /* | 43 | /* |
45 | * Turn off quota accounting and/or enforcement for all udquots and/or | 44 | * Turn off quota accounting and/or enforcement for all udquots and/or |
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv( | |||
573 | return 0; | 572 | return 0; |
574 | } | 573 | } |
575 | 574 | ||
576 | #define XFS_DQ_MASK \ | 575 | #define XFS_QC_MASK \ |
577 | (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) | 576 | (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) |
578 | 577 | ||
579 | /* | 578 | /* |
580 | * Adjust quota limits, and start/stop timers accordingly. | 579 | * Adjust quota limits, and start/stop timers accordingly. |
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim( | |||
584 | struct xfs_mount *mp, | 583 | struct xfs_mount *mp, |
585 | xfs_dqid_t id, | 584 | xfs_dqid_t id, |
586 | uint type, | 585 | uint type, |
587 | fs_disk_quota_t *newlim) | 586 | struct qc_dqblk *newlim) |
588 | { | 587 | { |
589 | struct xfs_quotainfo *q = mp->m_quotainfo; | 588 | struct xfs_quotainfo *q = mp->m_quotainfo; |
590 | struct xfs_disk_dquot *ddq; | 589 | struct xfs_disk_dquot *ddq; |
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim( | |||
593 | int error; | 592 | int error; |
594 | xfs_qcnt_t hard, soft; | 593 | xfs_qcnt_t hard, soft; |
595 | 594 | ||
596 | if (newlim->d_fieldmask & ~XFS_DQ_MASK) | 595 | if (newlim->d_fieldmask & ~XFS_QC_MASK) |
597 | return -EINVAL; | 596 | return -EINVAL; |
598 | if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) | 597 | if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) |
599 | return 0; | 598 | return 0; |
600 | 599 | ||
601 | /* | 600 | /* |
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim( | |||
633 | /* | 632 | /* |
634 | * Make sure that hardlimits are >= soft limits before changing. | 633 | * Make sure that hardlimits are >= soft limits before changing. |
635 | */ | 634 | */ |
636 | hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? | 635 | hard = (newlim->d_fieldmask & QC_SPC_HARD) ? |
637 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : | 636 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : |
638 | be64_to_cpu(ddq->d_blk_hardlimit); | 637 | be64_to_cpu(ddq->d_blk_hardlimit); |
639 | soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? | 638 | soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? |
640 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : | 639 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : |
641 | be64_to_cpu(ddq->d_blk_softlimit); | 640 | be64_to_cpu(ddq->d_blk_softlimit); |
642 | if (hard == 0 || hard >= soft) { | 641 | if (hard == 0 || hard >= soft) { |
643 | ddq->d_blk_hardlimit = cpu_to_be64(hard); | 642 | ddq->d_blk_hardlimit = cpu_to_be64(hard); |
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim( | |||
650 | } else { | 649 | } else { |
651 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); | 650 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); |
652 | } | 651 | } |
653 | hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? | 652 | hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? |
654 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : | 653 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : |
655 | be64_to_cpu(ddq->d_rtb_hardlimit); | 654 | be64_to_cpu(ddq->d_rtb_hardlimit); |
656 | soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? | 655 | soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? |
657 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : | 656 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : |
658 | be64_to_cpu(ddq->d_rtb_softlimit); | 657 | be64_to_cpu(ddq->d_rtb_softlimit); |
659 | if (hard == 0 || hard >= soft) { | 658 | if (hard == 0 || hard >= soft) { |
660 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); | 659 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); |
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim( | |||
667 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); | 666 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); |
668 | } | 667 | } |
669 | 668 | ||
670 | hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? | 669 | hard = (newlim->d_fieldmask & QC_INO_HARD) ? |
671 | (xfs_qcnt_t) newlim->d_ino_hardlimit : | 670 | (xfs_qcnt_t) newlim->d_ino_hardlimit : |
672 | be64_to_cpu(ddq->d_ino_hardlimit); | 671 | be64_to_cpu(ddq->d_ino_hardlimit); |
673 | soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? | 672 | soft = (newlim->d_fieldmask & QC_INO_SOFT) ? |
674 | (xfs_qcnt_t) newlim->d_ino_softlimit : | 673 | (xfs_qcnt_t) newlim->d_ino_softlimit : |
675 | be64_to_cpu(ddq->d_ino_softlimit); | 674 | be64_to_cpu(ddq->d_ino_softlimit); |
676 | if (hard == 0 || hard >= soft) { | 675 | if (hard == 0 || hard >= soft) { |
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim( | |||
687 | /* | 686 | /* |
688 | * Update warnings counter(s) if requested | 687 | * Update warnings counter(s) if requested |
689 | */ | 688 | */ |
690 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 689 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
691 | ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); | 690 | ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); |
692 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 691 | if (newlim->d_fieldmask & QC_INO_WARNS) |
693 | ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); | 692 | ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); |
694 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 693 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
695 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); | 694 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); |
696 | 695 | ||
697 | if (id == 0) { | 696 | if (id == 0) { |
698 | /* | 697 | /* |
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim( | |||
702 | * soft and hard limit values (already done, above), and | 701 | * soft and hard limit values (already done, above), and |
703 | * for warnings. | 702 | * for warnings. |
704 | */ | 703 | */ |
705 | if (newlim->d_fieldmask & FS_DQ_BTIMER) { | 704 | if (newlim->d_fieldmask & QC_SPC_TIMER) { |
706 | q->qi_btimelimit = newlim->d_btimer; | 705 | q->qi_btimelimit = newlim->d_spc_timer; |
707 | ddq->d_btimer = cpu_to_be32(newlim->d_btimer); | 706 | ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); |
708 | } | 707 | } |
709 | if (newlim->d_fieldmask & FS_DQ_ITIMER) { | 708 | if (newlim->d_fieldmask & QC_INO_TIMER) { |
710 | q->qi_itimelimit = newlim->d_itimer; | 709 | q->qi_itimelimit = newlim->d_ino_timer; |
711 | ddq->d_itimer = cpu_to_be32(newlim->d_itimer); | 710 | ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); |
712 | } | 711 | } |
713 | if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { | 712 | if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { |
714 | q->qi_rtbtimelimit = newlim->d_rtbtimer; | 713 | q->qi_rtbtimelimit = newlim->d_rt_spc_timer; |
715 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); | 714 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); |
716 | } | 715 | } |
717 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 716 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
718 | q->qi_bwarnlimit = newlim->d_bwarns; | 717 | q->qi_bwarnlimit = newlim->d_spc_warns; |
719 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 718 | if (newlim->d_fieldmask & QC_INO_WARNS) |
720 | q->qi_iwarnlimit = newlim->d_iwarns; | 719 | q->qi_iwarnlimit = newlim->d_ino_warns; |
721 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 720 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
722 | q->qi_rtbwarnlimit = newlim->d_rtbwarns; | 721 | q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; |
723 | } else { | 722 | } else { |
724 | /* | 723 | /* |
725 | * If the user is now over quota, start the timelimit. | 724 | * If the user is now over quota, start the timelimit. |
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota( | |||
824 | struct xfs_mount *mp, | 823 | struct xfs_mount *mp, |
825 | xfs_dqid_t id, | 824 | xfs_dqid_t id, |
826 | uint type, | 825 | uint type, |
827 | struct fs_disk_quota *dst) | 826 | struct qc_dqblk *dst) |
828 | { | 827 | { |
829 | struct xfs_dquot *dqp; | 828 | struct xfs_dquot *dqp; |
830 | int error; | 829 | int error; |
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota( | |||
848 | } | 847 | } |
849 | 848 | ||
850 | memset(dst, 0, sizeof(*dst)); | 849 | memset(dst, 0, sizeof(*dst)); |
851 | dst->d_version = FS_DQUOT_VERSION; | 850 | dst->d_spc_hardlimit = |
852 | dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); | 851 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); |
853 | dst->d_id = be32_to_cpu(dqp->q_core.d_id); | 852 | dst->d_spc_softlimit = |
854 | dst->d_blk_hardlimit = | 853 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); |
855 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); | ||
856 | dst->d_blk_softlimit = | ||
857 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); | ||
858 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); | 854 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); |
859 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); | 855 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); |
860 | dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); | 856 | dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); |
861 | dst->d_icount = dqp->q_res_icount; | 857 | dst->d_ino_count = dqp->q_res_icount; |
862 | dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); | 858 | dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); |
863 | dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); | 859 | dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); |
864 | dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); | 860 | dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); |
865 | dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); | 861 | dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); |
866 | dst->d_rtb_hardlimit = | 862 | dst->d_rt_spc_hardlimit = |
867 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); | 863 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); |
868 | dst->d_rtb_softlimit = | 864 | dst->d_rt_spc_softlimit = |
869 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); | 865 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); |
870 | dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); | 866 | dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); |
871 | dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); | 867 | dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); |
872 | dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); | 868 | dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); |
873 | 869 | ||
874 | /* | 870 | /* |
875 | * Internally, we don't reset all the timers when quota enforcement | 871 | * Internally, we don't reset all the timers when quota enforcement |
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota( | |||
882 | dqp->q_core.d_flags == XFS_DQ_GROUP) || | 878 | dqp->q_core.d_flags == XFS_DQ_GROUP) || |
883 | (!XFS_IS_PQUOTA_ENFORCED(mp) && | 879 | (!XFS_IS_PQUOTA_ENFORCED(mp) && |
884 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { | 880 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { |
885 | dst->d_btimer = 0; | 881 | dst->d_spc_timer = 0; |
886 | dst->d_itimer = 0; | 882 | dst->d_ino_timer = 0; |
887 | dst->d_rtbtimer = 0; | 883 | dst->d_rt_spc_timer = 0; |
888 | } | 884 | } |
889 | 885 | ||
890 | #ifdef DEBUG | 886 | #ifdef DEBUG |
891 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || | 887 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || |
892 | (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || | 888 | (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || |
893 | (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && | 889 | (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && |
894 | dst->d_id != 0) { | 890 | id != 0) { |
895 | if ((dst->d_bcount > dst->d_blk_softlimit) && | 891 | if ((dst->d_space > dst->d_spc_softlimit) && |
896 | (dst->d_blk_softlimit > 0)) { | 892 | (dst->d_spc_softlimit > 0)) { |
897 | ASSERT(dst->d_btimer != 0); | 893 | ASSERT(dst->d_spc_timer != 0); |
898 | } | 894 | } |
899 | if ((dst->d_icount > dst->d_ino_softlimit) && | 895 | if ((dst->d_ino_count > dst->d_ino_softlimit) && |
900 | (dst->d_ino_softlimit > 0)) { | 896 | (dst->d_ino_softlimit > 0)) { |
901 | ASSERT(dst->d_itimer != 0); | 897 | ASSERT(dst->d_ino_timer != 0); |
902 | } | 898 | } |
903 | } | 899 | } |
904 | #endif | 900 | #endif |
@@ -908,26 +904,6 @@ out_put: | |||
908 | } | 904 | } |
909 | 905 | ||
910 | STATIC uint | 906 | STATIC uint |
911 | xfs_qm_export_qtype_flags( | ||
912 | uint flags) | ||
913 | { | ||
914 | /* | ||
915 | * Can't be more than one, or none. | ||
916 | */ | ||
917 | ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != | ||
918 | (FS_PROJ_QUOTA | FS_USER_QUOTA)); | ||
919 | ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != | ||
920 | (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); | ||
921 | ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != | ||
922 | (FS_USER_QUOTA | FS_GROUP_QUOTA)); | ||
923 | ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); | ||
924 | |||
925 | return (flags & XFS_DQ_USER) ? | ||
926 | FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? | ||
927 | FS_PROJ_QUOTA : FS_GROUP_QUOTA; | ||
928 | } | ||
929 | |||
930 | STATIC uint | ||
931 | xfs_qm_export_flags( | 907 | xfs_qm_export_flags( |
932 | uint flags) | 908 | uint flags) |
933 | { | 909 | { |
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 7542bbeca6a1..801a84c1cdc3 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -131,7 +131,7 @@ STATIC int | |||
131 | xfs_fs_get_dqblk( | 131 | xfs_fs_get_dqblk( |
132 | struct super_block *sb, | 132 | struct super_block *sb, |
133 | struct kqid qid, | 133 | struct kqid qid, |
134 | struct fs_disk_quota *fdq) | 134 | struct qc_dqblk *qdq) |
135 | { | 135 | { |
136 | struct xfs_mount *mp = XFS_M(sb); | 136 | struct xfs_mount *mp = XFS_M(sb); |
137 | 137 | ||
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk( | |||
141 | return -ESRCH; | 141 | return -ESRCH; |
142 | 142 | ||
143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), | 143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), |
144 | xfs_quota_type(qid.type), fdq); | 144 | xfs_quota_type(qid.type), qdq); |
145 | } | 145 | } |
146 | 146 | ||
147 | STATIC int | 147 | STATIC int |
148 | xfs_fs_set_dqblk( | 148 | xfs_fs_set_dqblk( |
149 | struct super_block *sb, | 149 | struct super_block *sb, |
150 | struct kqid qid, | 150 | struct kqid qid, |
151 | struct fs_disk_quota *fdq) | 151 | struct qc_dqblk *qdq) |
152 | { | 152 | { |
153 | struct xfs_mount *mp = XFS_M(sb); | 153 | struct xfs_mount *mp = XFS_M(sb); |
154 | 154 | ||
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk( | |||
160 | return -ESRCH; | 160 | return -ESRCH; |
161 | 161 | ||
162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), | 162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), |
163 | xfs_quota_type(qid.type), fdq); | 163 | xfs_quota_type(qid.type), qdq); |
164 | } | 164 | } |
165 | 165 | ||
166 | const struct quotactl_ops xfs_quotactl_operations = { | 166 | const struct quotactl_ops xfs_quotactl_operations = { |