diff options
author | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2012-03-06 06:20:31 -0500 |
---|---|---|
committer | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2012-03-06 06:20:31 -0500 |
commit | 0ba86d7edea1f9f2d9497c31200910263e8b4128 (patch) | |
tree | 387f713e1afdd181e92699dcaa5fa3d45b30c75b /fs | |
parent | 973b659cbf2604b0b52829c224f6064d64118818 (diff) | |
parent | 192cfd58774b4d17b2fe8bdc77d89c2ef4e0591d (diff) |
Merge commit 'v3.3-rc6'
Merge v3.3-rc6 to get the latest DSS and OMAP arch fixes.
Conflicts:
arch/arm/mach-omap1/board-innovator.c
drivers/video/omap2/dss/apply.c
Diffstat (limited to 'fs')
50 files changed, 574 insertions, 331 deletions
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index d8d8e7ba6a1e..eb1cc92cd67d 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -110,6 +110,7 @@ struct autofs_sb_info { | |||
110 | int sub_version; | 110 | int sub_version; |
111 | int min_proto; | 111 | int min_proto; |
112 | int max_proto; | 112 | int max_proto; |
113 | int compat_daemon; | ||
113 | unsigned long exp_timeout; | 114 | unsigned long exp_timeout; |
114 | unsigned int type; | 115 | unsigned int type; |
115 | int reghost_enabled; | 116 | int reghost_enabled; |
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 76741d8d7786..85f1fcdb30e7 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c | |||
@@ -385,6 +385,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, | |||
385 | sbi->pipefd = pipefd; | 385 | sbi->pipefd = pipefd; |
386 | sbi->pipe = pipe; | 386 | sbi->pipe = pipe; |
387 | sbi->catatonic = 0; | 387 | sbi->catatonic = 0; |
388 | sbi->compat_daemon = is_compat_task(); | ||
388 | } | 389 | } |
389 | out: | 390 | out: |
390 | mutex_unlock(&sbi->wq_mutex); | 391 | mutex_unlock(&sbi->wq_mutex); |
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 450f529a4eae..1feb68ecef95 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c | |||
@@ -124,6 +124,7 @@ start: | |||
124 | /* Negative dentry - try next */ | 124 | /* Negative dentry - try next */ |
125 | if (!simple_positive(q)) { | 125 | if (!simple_positive(q)) { |
126 | spin_unlock(&p->d_lock); | 126 | spin_unlock(&p->d_lock); |
127 | lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_); | ||
127 | p = q; | 128 | p = q; |
128 | goto again; | 129 | goto again; |
129 | } | 130 | } |
@@ -186,6 +187,7 @@ again: | |||
186 | /* Negative dentry - try next */ | 187 | /* Negative dentry - try next */ |
187 | if (!simple_positive(ret)) { | 188 | if (!simple_positive(ret)) { |
188 | spin_unlock(&p->d_lock); | 189 | spin_unlock(&p->d_lock); |
190 | lock_set_subclass(&ret->d_lock.dep_map, 0, _RET_IP_); | ||
189 | p = ret; | 191 | p = ret; |
190 | goto again; | 192 | goto again; |
191 | } | 193 | } |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index e16980b00b8d..06858d955120 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/parser.h> | 19 | #include <linux/parser.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/magic.h> | 21 | #include <linux/magic.h> |
22 | #include <linux/compat.h> | ||
22 | #include "autofs_i.h" | 23 | #include "autofs_i.h" |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | 25 | ||
@@ -224,6 +225,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
224 | set_autofs_type_indirect(&sbi->type); | 225 | set_autofs_type_indirect(&sbi->type); |
225 | sbi->min_proto = 0; | 226 | sbi->min_proto = 0; |
226 | sbi->max_proto = 0; | 227 | sbi->max_proto = 0; |
228 | sbi->compat_daemon = is_compat_task(); | ||
227 | mutex_init(&sbi->wq_mutex); | 229 | mutex_init(&sbi->wq_mutex); |
228 | mutex_init(&sbi->pipe_mutex); | 230 | mutex_init(&sbi->pipe_mutex); |
229 | spin_lock_init(&sbi->fs_lock); | 231 | spin_lock_init(&sbi->fs_lock); |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index da8876d38a7b..9c098db43344 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -91,7 +91,24 @@ static int autofs4_write(struct autofs_sb_info *sbi, | |||
91 | 91 | ||
92 | return (bytes > 0); | 92 | return (bytes > 0); |
93 | } | 93 | } |
94 | 94 | ||
95 | /* | ||
96 | * The autofs_v5 packet was misdesigned. | ||
97 | * | ||
98 | * The packets are identical on x86-32 and x86-64, but have different | ||
99 | * alignment. Which means that 'sizeof()' will give different results. | ||
100 | * Fix it up for the case of running 32-bit user mode on a 64-bit kernel. | ||
101 | */ | ||
102 | static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi) | ||
103 | { | ||
104 | size_t pktsz = sizeof(struct autofs_v5_packet); | ||
105 | #if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT) | ||
106 | if (sbi->compat_daemon > 0) | ||
107 | pktsz -= 4; | ||
108 | #endif | ||
109 | return pktsz; | ||
110 | } | ||
111 | |||
95 | static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | 112 | static void autofs4_notify_daemon(struct autofs_sb_info *sbi, |
96 | struct autofs_wait_queue *wq, | 113 | struct autofs_wait_queue *wq, |
97 | int type) | 114 | int type) |
@@ -155,8 +172,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
155 | { | 172 | { |
156 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; | 173 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; |
157 | 174 | ||
158 | pktsz = sizeof(*packet); | 175 | pktsz = autofs_v5_packet_size(sbi); |
159 | |||
160 | packet->wait_queue_token = wq->wait_queue_token; | 176 | packet->wait_queue_token = wq->wait_queue_token; |
161 | packet->len = wq->name.len; | 177 | packet->len = wq->name.len; |
162 | memcpy(packet->name, wq->name.name, wq->name.len); | 178 | memcpy(packet->name, wq->name.name, wq->name.len); |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index bcb884e2d613..07d096c49920 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1421 | for (i = 1; i < view->n; ++i) { | 1421 | for (i = 1; i < view->n; ++i) { |
1422 | const struct user_regset *regset = &view->regsets[i]; | 1422 | const struct user_regset *regset = &view->regsets[i]; |
1423 | do_thread_regset_writeback(t->task, regset); | 1423 | do_thread_regset_writeback(t->task, regset); |
1424 | if (regset->core_note_type && | 1424 | if (regset->core_note_type && regset->get && |
1425 | (!regset->active || regset->active(t->task, regset))) { | 1425 | (!regset->active || regset->active(t->task, regset))) { |
1426 | int ret; | 1426 | int ret; |
1427 | size_t size = regset->n * regset->size; | 1427 | size_t size = regset->n * regset->size; |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 633c701a287d..98f6bf10bbd4 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -892,6 +892,8 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
892 | if (eb != eb_in) | 892 | if (eb != eb_in) |
893 | free_extent_buffer(eb); | 893 | free_extent_buffer(eb); |
894 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); | 894 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); |
895 | if (ret > 0) | ||
896 | ret = -ENOENT; | ||
895 | if (ret) | 897 | if (ret) |
896 | break; | 898 | break; |
897 | next_inum = found_key.offset; | 899 | next_inum = found_key.offset; |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index b669a7d8e499..d986824bb2b4 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -644,7 +644,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( | |||
644 | static int btrfsic_process_superblock(struct btrfsic_state *state, | 644 | static int btrfsic_process_superblock(struct btrfsic_state *state, |
645 | struct btrfs_fs_devices *fs_devices) | 645 | struct btrfs_fs_devices *fs_devices) |
646 | { | 646 | { |
647 | int ret; | 647 | int ret = 0; |
648 | struct btrfs_super_block *selected_super; | 648 | struct btrfs_super_block *selected_super; |
649 | struct list_head *dev_head = &fs_devices->devices; | 649 | struct list_head *dev_head = &fs_devices->devices; |
650 | struct btrfs_device *device; | 650 | struct btrfs_device *device; |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 14f1c5a0b2d2..d02c27cd14c7 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -588,6 +588,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
588 | page_offset(bio->bi_io_vec->bv_page), | 588 | page_offset(bio->bi_io_vec->bv_page), |
589 | PAGE_CACHE_SIZE); | 589 | PAGE_CACHE_SIZE); |
590 | read_unlock(&em_tree->lock); | 590 | read_unlock(&em_tree->lock); |
591 | if (!em) | ||
592 | return -EIO; | ||
591 | 593 | ||
592 | compressed_len = em->block_len; | 594 | compressed_len = em->block_len; |
593 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 595 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 27ebe61d3ccc..80b6486fd5e6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -886,7 +886,7 @@ struct btrfs_block_rsv { | |||
886 | u64 reserved; | 886 | u64 reserved; |
887 | struct btrfs_space_info *space_info; | 887 | struct btrfs_space_info *space_info; |
888 | spinlock_t lock; | 888 | spinlock_t lock; |
889 | unsigned int full:1; | 889 | unsigned int full; |
890 | }; | 890 | }; |
891 | 891 | ||
892 | /* | 892 | /* |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 811d9f918b1c..534266fe505f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2260,6 +2260,12 @@ int open_ctree(struct super_block *sb, | |||
2260 | goto fail_sb_buffer; | 2260 | goto fail_sb_buffer; |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | if (sectorsize < PAGE_SIZE) { | ||
2264 | printk(KERN_WARNING "btrfs: Incompatible sector size " | ||
2265 | "found on %s\n", sb->s_id); | ||
2266 | goto fail_sb_buffer; | ||
2267 | } | ||
2268 | |||
2263 | mutex_lock(&fs_info->chunk_mutex); | 2269 | mutex_lock(&fs_info->chunk_mutex); |
2264 | ret = btrfs_read_sys_array(tree_root); | 2270 | ret = btrfs_read_sys_array(tree_root); |
2265 | mutex_unlock(&fs_info->chunk_mutex); | 2271 | mutex_unlock(&fs_info->chunk_mutex); |
@@ -2301,6 +2307,12 @@ int open_ctree(struct super_block *sb, | |||
2301 | 2307 | ||
2302 | btrfs_close_extra_devices(fs_devices); | 2308 | btrfs_close_extra_devices(fs_devices); |
2303 | 2309 | ||
2310 | if (!fs_devices->latest_bdev) { | ||
2311 | printk(KERN_CRIT "btrfs: failed to read devices on %s\n", | ||
2312 | sb->s_id); | ||
2313 | goto fail_tree_roots; | ||
2314 | } | ||
2315 | |||
2304 | retry_root_backup: | 2316 | retry_root_backup: |
2305 | blocksize = btrfs_level_size(tree_root, | 2317 | blocksize = btrfs_level_size(tree_root, |
2306 | btrfs_super_root_level(disk_super)); | 2318 | btrfs_super_root_level(disk_super)); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 283af7a676a3..37e0a800d34e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3312,7 +3312,8 @@ commit_trans: | |||
3312 | } | 3312 | } |
3313 | data_sinfo->bytes_may_use += bytes; | 3313 | data_sinfo->bytes_may_use += bytes; |
3314 | trace_btrfs_space_reservation(root->fs_info, "space_info", | 3314 | trace_btrfs_space_reservation(root->fs_info, "space_info", |
3315 | (u64)data_sinfo, bytes, 1); | 3315 | (u64)(unsigned long)data_sinfo, |
3316 | bytes, 1); | ||
3316 | spin_unlock(&data_sinfo->lock); | 3317 | spin_unlock(&data_sinfo->lock); |
3317 | 3318 | ||
3318 | return 0; | 3319 | return 0; |
@@ -3333,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) | |||
3333 | spin_lock(&data_sinfo->lock); | 3334 | spin_lock(&data_sinfo->lock); |
3334 | data_sinfo->bytes_may_use -= bytes; | 3335 | data_sinfo->bytes_may_use -= bytes; |
3335 | trace_btrfs_space_reservation(root->fs_info, "space_info", | 3336 | trace_btrfs_space_reservation(root->fs_info, "space_info", |
3336 | (u64)data_sinfo, bytes, 0); | 3337 | (u64)(unsigned long)data_sinfo, |
3338 | bytes, 0); | ||
3337 | spin_unlock(&data_sinfo->lock); | 3339 | spin_unlock(&data_sinfo->lock); |
3338 | } | 3340 | } |
3339 | 3341 | ||
@@ -3611,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root, | |||
3611 | if (space_info != delayed_rsv->space_info) | 3613 | if (space_info != delayed_rsv->space_info) |
3612 | return -ENOSPC; | 3614 | return -ENOSPC; |
3613 | 3615 | ||
3616 | spin_lock(&space_info->lock); | ||
3614 | spin_lock(&delayed_rsv->lock); | 3617 | spin_lock(&delayed_rsv->lock); |
3615 | if (delayed_rsv->size < bytes) { | 3618 | if (space_info->bytes_pinned + delayed_rsv->size < bytes) { |
3616 | spin_unlock(&delayed_rsv->lock); | 3619 | spin_unlock(&delayed_rsv->lock); |
3620 | spin_unlock(&space_info->lock); | ||
3617 | return -ENOSPC; | 3621 | return -ENOSPC; |
3618 | } | 3622 | } |
3619 | spin_unlock(&delayed_rsv->lock); | 3623 | spin_unlock(&delayed_rsv->lock); |
3624 | spin_unlock(&space_info->lock); | ||
3620 | 3625 | ||
3621 | commit: | 3626 | commit: |
3622 | trans = btrfs_join_transaction(root); | 3627 | trans = btrfs_join_transaction(root); |
@@ -3695,9 +3700,9 @@ again: | |||
3695 | if (used + orig_bytes <= space_info->total_bytes) { | 3700 | if (used + orig_bytes <= space_info->total_bytes) { |
3696 | space_info->bytes_may_use += orig_bytes; | 3701 | space_info->bytes_may_use += orig_bytes; |
3697 | trace_btrfs_space_reservation(root->fs_info, | 3702 | trace_btrfs_space_reservation(root->fs_info, |
3698 | "space_info", | 3703 | "space_info", |
3699 | (u64)space_info, | 3704 | (u64)(unsigned long)space_info, |
3700 | orig_bytes, 1); | 3705 | orig_bytes, 1); |
3701 | ret = 0; | 3706 | ret = 0; |
3702 | } else { | 3707 | } else { |
3703 | /* | 3708 | /* |
@@ -3766,9 +3771,9 @@ again: | |||
3766 | if (used + num_bytes < space_info->total_bytes + avail) { | 3771 | if (used + num_bytes < space_info->total_bytes + avail) { |
3767 | space_info->bytes_may_use += orig_bytes; | 3772 | space_info->bytes_may_use += orig_bytes; |
3768 | trace_btrfs_space_reservation(root->fs_info, | 3773 | trace_btrfs_space_reservation(root->fs_info, |
3769 | "space_info", | 3774 | "space_info", |
3770 | (u64)space_info, | 3775 | (u64)(unsigned long)space_info, |
3771 | orig_bytes, 1); | 3776 | orig_bytes, 1); |
3772 | ret = 0; | 3777 | ret = 0; |
3773 | } else { | 3778 | } else { |
3774 | wait_ordered = true; | 3779 | wait_ordered = true; |
@@ -3913,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info, | |||
3913 | spin_lock(&space_info->lock); | 3918 | spin_lock(&space_info->lock); |
3914 | space_info->bytes_may_use -= num_bytes; | 3919 | space_info->bytes_may_use -= num_bytes; |
3915 | trace_btrfs_space_reservation(fs_info, "space_info", | 3920 | trace_btrfs_space_reservation(fs_info, "space_info", |
3916 | (u64)space_info, | 3921 | (u64)(unsigned long)space_info, |
3917 | num_bytes, 0); | 3922 | num_bytes, 0); |
3918 | space_info->reservation_progress++; | 3923 | space_info->reservation_progress++; |
3919 | spin_unlock(&space_info->lock); | 3924 | spin_unlock(&space_info->lock); |
3920 | } | 3925 | } |
@@ -4105,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) | |||
4105 | num_bytes += div64_u64(data_used + meta_used, 50); | 4110 | num_bytes += div64_u64(data_used + meta_used, 50); |
4106 | 4111 | ||
4107 | if (num_bytes * 3 > meta_used) | 4112 | if (num_bytes * 3 > meta_used) |
4108 | num_bytes = div64_u64(meta_used, 3); | 4113 | num_bytes = div64_u64(meta_used, 3) * 2; |
4109 | 4114 | ||
4110 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); | 4115 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); |
4111 | } | 4116 | } |
@@ -4132,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
4132 | block_rsv->reserved += num_bytes; | 4137 | block_rsv->reserved += num_bytes; |
4133 | sinfo->bytes_may_use += num_bytes; | 4138 | sinfo->bytes_may_use += num_bytes; |
4134 | trace_btrfs_space_reservation(fs_info, "space_info", | 4139 | trace_btrfs_space_reservation(fs_info, "space_info", |
4135 | (u64)sinfo, num_bytes, 1); | 4140 | (u64)(unsigned long)sinfo, num_bytes, 1); |
4136 | } | 4141 | } |
4137 | 4142 | ||
4138 | if (block_rsv->reserved >= block_rsv->size) { | 4143 | if (block_rsv->reserved >= block_rsv->size) { |
4139 | num_bytes = block_rsv->reserved - block_rsv->size; | 4144 | num_bytes = block_rsv->reserved - block_rsv->size; |
4140 | sinfo->bytes_may_use -= num_bytes; | 4145 | sinfo->bytes_may_use -= num_bytes; |
4141 | trace_btrfs_space_reservation(fs_info, "space_info", | 4146 | trace_btrfs_space_reservation(fs_info, "space_info", |
4142 | (u64)sinfo, num_bytes, 0); | 4147 | (u64)(unsigned long)sinfo, num_bytes, 0); |
4143 | sinfo->reservation_progress++; | 4148 | sinfo->reservation_progress++; |
4144 | block_rsv->reserved = block_rsv->size; | 4149 | block_rsv->reserved = block_rsv->size; |
4145 | block_rsv->full = 1; | 4150 | block_rsv->full = 1; |
@@ -4192,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, | |||
4192 | if (!trans->bytes_reserved) | 4197 | if (!trans->bytes_reserved) |
4193 | return; | 4198 | return; |
4194 | 4199 | ||
4195 | trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans, | 4200 | trace_btrfs_space_reservation(root->fs_info, "transaction", |
4201 | (u64)(unsigned long)trans, | ||
4196 | trans->bytes_reserved, 0); | 4202 | trans->bytes_reserved, 0); |
4197 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); | 4203 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); |
4198 | trans->bytes_reserved = 0; | 4204 | trans->bytes_reserved = 0; |
@@ -4710,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, | |||
4710 | space_info->bytes_reserved += num_bytes; | 4716 | space_info->bytes_reserved += num_bytes; |
4711 | if (reserve == RESERVE_ALLOC) { | 4717 | if (reserve == RESERVE_ALLOC) { |
4712 | trace_btrfs_space_reservation(cache->fs_info, | 4718 | trace_btrfs_space_reservation(cache->fs_info, |
4713 | "space_info", | 4719 | "space_info", |
4714 | (u64)space_info, | 4720 | (u64)(unsigned long)space_info, |
4715 | num_bytes, 0); | 4721 | num_bytes, 0); |
4716 | space_info->bytes_may_use -= num_bytes; | 4722 | space_info->bytes_may_use -= num_bytes; |
4717 | } | 4723 | } |
4718 | } | 4724 | } |
@@ -7886,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) | |||
7886 | u64 start; | 7892 | u64 start; |
7887 | u64 end; | 7893 | u64 end; |
7888 | u64 trimmed = 0; | 7894 | u64 trimmed = 0; |
7895 | u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); | ||
7889 | int ret = 0; | 7896 | int ret = 0; |
7890 | 7897 | ||
7891 | cache = btrfs_lookup_block_group(fs_info, range->start); | 7898 | /* |
7899 | * try to trim all FS space, our block group may start from non-zero. | ||
7900 | */ | ||
7901 | if (range->len == total_bytes) | ||
7902 | cache = btrfs_lookup_first_block_group(fs_info, range->start); | ||
7903 | else | ||
7904 | cache = btrfs_lookup_block_group(fs_info, range->start); | ||
7892 | 7905 | ||
7893 | while (cache) { | 7906 | while (cache) { |
7894 | if (cache->key.objectid >= (range->start + range->len)) { | 7907 | if (cache->key.objectid >= (range->start + range->len)) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fcf77e1ded40..a55fbe6252de 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -513,6 +513,15 @@ hit_next: | |||
513 | WARN_ON(state->end < start); | 513 | WARN_ON(state->end < start); |
514 | last_end = state->end; | 514 | last_end = state->end; |
515 | 515 | ||
516 | if (state->end < end && !need_resched()) | ||
517 | next_node = rb_next(&state->rb_node); | ||
518 | else | ||
519 | next_node = NULL; | ||
520 | |||
521 | /* the state doesn't have the wanted bits, go ahead */ | ||
522 | if (!(state->state & bits)) | ||
523 | goto next; | ||
524 | |||
516 | /* | 525 | /* |
517 | * | ---- desired range ---- | | 526 | * | ---- desired range ---- | |
518 | * | state | or | 527 | * | state | or |
@@ -565,20 +574,15 @@ hit_next: | |||
565 | goto out; | 574 | goto out; |
566 | } | 575 | } |
567 | 576 | ||
568 | if (state->end < end && prealloc && !need_resched()) | ||
569 | next_node = rb_next(&state->rb_node); | ||
570 | else | ||
571 | next_node = NULL; | ||
572 | |||
573 | set |= clear_state_bit(tree, state, &bits, wake); | 577 | set |= clear_state_bit(tree, state, &bits, wake); |
578 | next: | ||
574 | if (last_end == (u64)-1) | 579 | if (last_end == (u64)-1) |
575 | goto out; | 580 | goto out; |
576 | start = last_end + 1; | 581 | start = last_end + 1; |
577 | if (start <= end && next_node) { | 582 | if (start <= end && next_node) { |
578 | state = rb_entry(next_node, struct extent_state, | 583 | state = rb_entry(next_node, struct extent_state, |
579 | rb_node); | 584 | rb_node); |
580 | if (state->start == start) | 585 | goto hit_next; |
581 | goto hit_next; | ||
582 | } | 586 | } |
583 | goto search_again; | 587 | goto search_again; |
584 | 588 | ||
@@ -961,8 +965,6 @@ hit_next: | |||
961 | 965 | ||
962 | set_state_bits(tree, state, &bits); | 966 | set_state_bits(tree, state, &bits); |
963 | clear_state_bit(tree, state, &clear_bits, 0); | 967 | clear_state_bit(tree, state, &clear_bits, 0); |
964 | |||
965 | merge_state(tree, state); | ||
966 | if (last_end == (u64)-1) | 968 | if (last_end == (u64)-1) |
967 | goto out; | 969 | goto out; |
968 | 970 | ||
@@ -1007,7 +1009,6 @@ hit_next: | |||
1007 | if (state->end <= end) { | 1009 | if (state->end <= end) { |
1008 | set_state_bits(tree, state, &bits); | 1010 | set_state_bits(tree, state, &bits); |
1009 | clear_state_bit(tree, state, &clear_bits, 0); | 1011 | clear_state_bit(tree, state, &clear_bits, 0); |
1010 | merge_state(tree, state); | ||
1011 | if (last_end == (u64)-1) | 1012 | if (last_end == (u64)-1) |
1012 | goto out; | 1013 | goto out; |
1013 | start = last_end + 1; | 1014 | start = last_end + 1; |
@@ -1068,8 +1069,6 @@ hit_next: | |||
1068 | 1069 | ||
1069 | set_state_bits(tree, prealloc, &bits); | 1070 | set_state_bits(tree, prealloc, &bits); |
1070 | clear_state_bit(tree, prealloc, &clear_bits, 0); | 1071 | clear_state_bit(tree, prealloc, &clear_bits, 0); |
1071 | |||
1072 | merge_state(tree, prealloc); | ||
1073 | prealloc = NULL; | 1072 | prealloc = NULL; |
1074 | goto out; | 1073 | goto out; |
1075 | } | 1074 | } |
@@ -2154,13 +2153,46 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
2154 | "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, | 2153 | "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, |
2155 | failrec->this_mirror, num_copies, failrec->in_validation); | 2154 | failrec->this_mirror, num_copies, failrec->in_validation); |
2156 | 2155 | ||
2157 | tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror, | 2156 | ret = tree->ops->submit_bio_hook(inode, read_mode, bio, |
2158 | failrec->bio_flags, 0); | 2157 | failrec->this_mirror, |
2159 | return 0; | 2158 | failrec->bio_flags, 0); |
2159 | return ret; | ||
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | /* lots and lots of room for performance fixes in the end_bio funcs */ | 2162 | /* lots and lots of room for performance fixes in the end_bio funcs */ |
2163 | 2163 | ||
2164 | int end_extent_writepage(struct page *page, int err, u64 start, u64 end) | ||
2165 | { | ||
2166 | int uptodate = (err == 0); | ||
2167 | struct extent_io_tree *tree; | ||
2168 | int ret; | ||
2169 | |||
2170 | tree = &BTRFS_I(page->mapping->host)->io_tree; | ||
2171 | |||
2172 | if (tree->ops && tree->ops->writepage_end_io_hook) { | ||
2173 | ret = tree->ops->writepage_end_io_hook(page, start, | ||
2174 | end, NULL, uptodate); | ||
2175 | if (ret) | ||
2176 | uptodate = 0; | ||
2177 | } | ||
2178 | |||
2179 | if (!uptodate && tree->ops && | ||
2180 | tree->ops->writepage_io_failed_hook) { | ||
2181 | ret = tree->ops->writepage_io_failed_hook(NULL, page, | ||
2182 | start, end, NULL); | ||
2183 | /* Writeback already completed */ | ||
2184 | if (ret == 0) | ||
2185 | return 1; | ||
2186 | } | ||
2187 | |||
2188 | if (!uptodate) { | ||
2189 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); | ||
2190 | ClearPageUptodate(page); | ||
2191 | SetPageError(page); | ||
2192 | } | ||
2193 | return 0; | ||
2194 | } | ||
2195 | |||
2164 | /* | 2196 | /* |
2165 | * after a writepage IO is done, we need to: | 2197 | * after a writepage IO is done, we need to: |
2166 | * clear the uptodate bits on error | 2198 | * clear the uptodate bits on error |
@@ -2172,13 +2204,11 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
2172 | */ | 2204 | */ |
2173 | static void end_bio_extent_writepage(struct bio *bio, int err) | 2205 | static void end_bio_extent_writepage(struct bio *bio, int err) |
2174 | { | 2206 | { |
2175 | int uptodate = err == 0; | ||
2176 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 2207 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
2177 | struct extent_io_tree *tree; | 2208 | struct extent_io_tree *tree; |
2178 | u64 start; | 2209 | u64 start; |
2179 | u64 end; | 2210 | u64 end; |
2180 | int whole_page; | 2211 | int whole_page; |
2181 | int ret; | ||
2182 | 2212 | ||
2183 | do { | 2213 | do { |
2184 | struct page *page = bvec->bv_page; | 2214 | struct page *page = bvec->bv_page; |
@@ -2195,28 +2225,9 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
2195 | 2225 | ||
2196 | if (--bvec >= bio->bi_io_vec) | 2226 | if (--bvec >= bio->bi_io_vec) |
2197 | prefetchw(&bvec->bv_page->flags); | 2227 | prefetchw(&bvec->bv_page->flags); |
2198 | if (tree->ops && tree->ops->writepage_end_io_hook) { | ||
2199 | ret = tree->ops->writepage_end_io_hook(page, start, | ||
2200 | end, NULL, uptodate); | ||
2201 | if (ret) | ||
2202 | uptodate = 0; | ||
2203 | } | ||
2204 | |||
2205 | if (!uptodate && tree->ops && | ||
2206 | tree->ops->writepage_io_failed_hook) { | ||
2207 | ret = tree->ops->writepage_io_failed_hook(bio, page, | ||
2208 | start, end, NULL); | ||
2209 | if (ret == 0) { | ||
2210 | uptodate = (err == 0); | ||
2211 | continue; | ||
2212 | } | ||
2213 | } | ||
2214 | 2228 | ||
2215 | if (!uptodate) { | 2229 | if (end_extent_writepage(page, err, start, end)) |
2216 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); | 2230 | continue; |
2217 | ClearPageUptodate(page); | ||
2218 | SetPageError(page); | ||
2219 | } | ||
2220 | 2231 | ||
2221 | if (whole_page) | 2232 | if (whole_page) |
2222 | end_page_writeback(page); | 2233 | end_page_writeback(page); |
@@ -2779,9 +2790,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2779 | delalloc_start = delalloc_end + 1; | 2790 | delalloc_start = delalloc_end + 1; |
2780 | continue; | 2791 | continue; |
2781 | } | 2792 | } |
2782 | tree->ops->fill_delalloc(inode, page, delalloc_start, | 2793 | ret = tree->ops->fill_delalloc(inode, page, |
2783 | delalloc_end, &page_started, | 2794 | delalloc_start, |
2784 | &nr_written); | 2795 | delalloc_end, |
2796 | &page_started, | ||
2797 | &nr_written); | ||
2798 | BUG_ON(ret); | ||
2785 | /* | 2799 | /* |
2786 | * delalloc_end is already one less than the total | 2800 | * delalloc_end is already one less than the total |
2787 | * length, so we don't subtract one from | 2801 | * length, so we don't subtract one from |
@@ -2818,8 +2832,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2818 | if (tree->ops && tree->ops->writepage_start_hook) { | 2832 | if (tree->ops && tree->ops->writepage_start_hook) { |
2819 | ret = tree->ops->writepage_start_hook(page, start, | 2833 | ret = tree->ops->writepage_start_hook(page, start, |
2820 | page_end); | 2834 | page_end); |
2821 | if (ret == -EAGAIN) { | 2835 | if (ret) { |
2822 | redirty_page_for_writepage(wbc, page); | 2836 | /* Fixup worker will requeue */ |
2837 | if (ret == -EBUSY) | ||
2838 | wbc->pages_skipped++; | ||
2839 | else | ||
2840 | redirty_page_for_writepage(wbc, page); | ||
2823 | update_nr_written(page, wbc, nr_written); | 2841 | update_nr_written(page, wbc, nr_written); |
2824 | unlock_page(page); | 2842 | unlock_page(page); |
2825 | ret = 0; | 2843 | ret = 0; |
@@ -3289,7 +3307,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
3289 | len = end - start + 1; | 3307 | len = end - start + 1; |
3290 | write_lock(&map->lock); | 3308 | write_lock(&map->lock); |
3291 | em = lookup_extent_mapping(map, start, len); | 3309 | em = lookup_extent_mapping(map, start, len); |
3292 | if (IS_ERR_OR_NULL(em)) { | 3310 | if (!em) { |
3293 | write_unlock(&map->lock); | 3311 | write_unlock(&map->lock); |
3294 | break; | 3312 | break; |
3295 | } | 3313 | } |
@@ -3853,10 +3871,9 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3853 | num_pages = num_extent_pages(eb->start, eb->len); | 3871 | num_pages = num_extent_pages(eb->start, eb->len); |
3854 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3872 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3855 | 3873 | ||
3856 | if (eb_straddles_pages(eb)) { | 3874 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3857 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3875 | cached_state, GFP_NOFS); |
3858 | cached_state, GFP_NOFS); | 3876 | |
3859 | } | ||
3860 | for (i = 0; i < num_pages; i++) { | 3877 | for (i = 0; i < num_pages; i++) { |
3861 | page = extent_buffer_page(eb, i); | 3878 | page = extent_buffer_page(eb, i); |
3862 | if (page) | 3879 | if (page) |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index bc6a042cb6fc..cecc3518c121 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -319,4 +319,5 @@ struct btrfs_mapping_tree; | |||
319 | int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | 319 | int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, |
320 | u64 length, u64 logical, struct page *page, | 320 | u64 length, u64 logical, struct page *page, |
321 | int mirror_num); | 321 | int mirror_num); |
322 | int end_extent_writepage(struct page *page, int err, u64 start, u64 end); | ||
322 | #endif | 323 | #endif |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 33a7890b1f40..1195f09761fe 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -26,8 +26,8 @@ struct extent_map { | |||
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | struct block_device *bdev; | 27 | struct block_device *bdev; |
28 | atomic_t refs; | 28 | atomic_t refs; |
29 | unsigned int in_tree:1; | 29 | unsigned int in_tree; |
30 | unsigned int compress_type:4; | 30 | unsigned int compress_type; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct extent_map_tree { | 33 | struct extent_map_tree { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 859ba2dd8890..e8d06b6b9194 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1605,6 +1605,14 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1605 | return -EOPNOTSUPP; | 1605 | return -EOPNOTSUPP; |
1606 | 1606 | ||
1607 | /* | 1607 | /* |
1608 | * Make sure we have enough space before we do the | ||
1609 | * allocation. | ||
1610 | */ | ||
1611 | ret = btrfs_check_data_free_space(inode, len); | ||
1612 | if (ret) | ||
1613 | return ret; | ||
1614 | |||
1615 | /* | ||
1608 | * wait for ordered IO before we have any locks. We'll loop again | 1616 | * wait for ordered IO before we have any locks. We'll loop again |
1609 | * below with the locks held. | 1617 | * below with the locks held. |
1610 | */ | 1618 | */ |
@@ -1667,27 +1675,12 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1667 | if (em->block_start == EXTENT_MAP_HOLE || | 1675 | if (em->block_start == EXTENT_MAP_HOLE || |
1668 | (cur_offset >= inode->i_size && | 1676 | (cur_offset >= inode->i_size && |
1669 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | 1677 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
1670 | |||
1671 | /* | ||
1672 | * Make sure we have enough space before we do the | ||
1673 | * allocation. | ||
1674 | */ | ||
1675 | ret = btrfs_check_data_free_space(inode, last_byte - | ||
1676 | cur_offset); | ||
1677 | if (ret) { | ||
1678 | free_extent_map(em); | ||
1679 | break; | ||
1680 | } | ||
1681 | |||
1682 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, | 1678 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, |
1683 | last_byte - cur_offset, | 1679 | last_byte - cur_offset, |
1684 | 1 << inode->i_blkbits, | 1680 | 1 << inode->i_blkbits, |
1685 | offset + len, | 1681 | offset + len, |
1686 | &alloc_hint); | 1682 | &alloc_hint); |
1687 | 1683 | ||
1688 | /* Let go of our reservation. */ | ||
1689 | btrfs_free_reserved_data_space(inode, last_byte - | ||
1690 | cur_offset); | ||
1691 | if (ret < 0) { | 1684 | if (ret < 0) { |
1692 | free_extent_map(em); | 1685 | free_extent_map(em); |
1693 | break; | 1686 | break; |
@@ -1715,6 +1708,8 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1715 | &cached_state, GFP_NOFS); | 1708 | &cached_state, GFP_NOFS); |
1716 | out: | 1709 | out: |
1717 | mutex_unlock(&inode->i_mutex); | 1710 | mutex_unlock(&inode->i_mutex); |
1711 | /* Let go of our reservation. */ | ||
1712 | btrfs_free_reserved_data_space(inode, len); | ||
1718 | return ret; | 1713 | return ret; |
1719 | } | 1714 | } |
1720 | 1715 | ||
@@ -1761,7 +1756,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) | |||
1761 | start - root->sectorsize, | 1756 | start - root->sectorsize, |
1762 | root->sectorsize, 0); | 1757 | root->sectorsize, 0); |
1763 | if (IS_ERR(em)) { | 1758 | if (IS_ERR(em)) { |
1764 | ret = -ENXIO; | 1759 | ret = PTR_ERR(em); |
1765 | goto out; | 1760 | goto out; |
1766 | } | 1761 | } |
1767 | last_end = em->start + em->len; | 1762 | last_end = em->start + em->len; |
@@ -1773,7 +1768,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) | |||
1773 | while (1) { | 1768 | while (1) { |
1774 | em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); | 1769 | em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); |
1775 | if (IS_ERR(em)) { | 1770 | if (IS_ERR(em)) { |
1776 | ret = -ENXIO; | 1771 | ret = PTR_ERR(em); |
1777 | break; | 1772 | break; |
1778 | } | 1773 | } |
1779 | 1774 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c2f20594c9f7..710ea380c7ed 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -777,6 +777,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
777 | spin_lock(&block_group->lock); | 777 | spin_lock(&block_group->lock); |
778 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | 778 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
779 | spin_unlock(&block_group->lock); | 779 | spin_unlock(&block_group->lock); |
780 | btrfs_free_path(path); | ||
780 | goto out; | 781 | goto out; |
781 | } | 782 | } |
782 | spin_unlock(&block_group->lock); | 783 | spin_unlock(&block_group->lock); |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 213ffa86ce1b..ee15d88b33d2 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -438,7 +438,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root, | |||
438 | trans->bytes_reserved); | 438 | trans->bytes_reserved); |
439 | if (ret) | 439 | if (ret) |
440 | goto out; | 440 | goto out; |
441 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, | 441 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", |
442 | (u64)(unsigned long)trans, | ||
442 | trans->bytes_reserved, 1); | 443 | trans->bytes_reserved, 1); |
443 | again: | 444 | again: |
444 | inode = lookup_free_ino_inode(root, path); | 445 | inode = lookup_free_ino_inode(root, path); |
@@ -500,7 +501,8 @@ again: | |||
500 | out_put: | 501 | out_put: |
501 | iput(inode); | 502 | iput(inode); |
502 | out_release: | 503 | out_release: |
503 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, | 504 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", |
505 | (u64)(unsigned long)trans, | ||
504 | trans->bytes_reserved, 0); | 506 | trans->bytes_reserved, 0); |
505 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); | 507 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); |
506 | out: | 508 | out: |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 32214fe0f7e3..892b34785ccc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1555,6 +1555,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) | |||
1555 | struct inode *inode; | 1555 | struct inode *inode; |
1556 | u64 page_start; | 1556 | u64 page_start; |
1557 | u64 page_end; | 1557 | u64 page_end; |
1558 | int ret; | ||
1558 | 1559 | ||
1559 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | 1560 | fixup = container_of(work, struct btrfs_writepage_fixup, work); |
1560 | page = fixup->page; | 1561 | page = fixup->page; |
@@ -1582,12 +1583,21 @@ again: | |||
1582 | page_end, &cached_state, GFP_NOFS); | 1583 | page_end, &cached_state, GFP_NOFS); |
1583 | unlock_page(page); | 1584 | unlock_page(page); |
1584 | btrfs_start_ordered_extent(inode, ordered, 1); | 1585 | btrfs_start_ordered_extent(inode, ordered, 1); |
1586 | btrfs_put_ordered_extent(ordered); | ||
1585 | goto again; | 1587 | goto again; |
1586 | } | 1588 | } |
1587 | 1589 | ||
1588 | BUG(); | 1590 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
1591 | if (ret) { | ||
1592 | mapping_set_error(page->mapping, ret); | ||
1593 | end_extent_writepage(page, ret, page_start, page_end); | ||
1594 | ClearPageChecked(page); | ||
1595 | goto out; | ||
1596 | } | ||
1597 | |||
1589 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); | 1598 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
1590 | ClearPageChecked(page); | 1599 | ClearPageChecked(page); |
1600 | set_page_dirty(page); | ||
1591 | out: | 1601 | out: |
1592 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, | 1602 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1593 | &cached_state, GFP_NOFS); | 1603 | &cached_state, GFP_NOFS); |
@@ -1630,7 +1640,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |||
1630 | fixup->work.func = btrfs_writepage_fixup_worker; | 1640 | fixup->work.func = btrfs_writepage_fixup_worker; |
1631 | fixup->page = page; | 1641 | fixup->page = page; |
1632 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | 1642 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); |
1633 | return -EAGAIN; | 1643 | return -EBUSY; |
1634 | } | 1644 | } |
1635 | 1645 | ||
1636 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | 1646 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
@@ -4575,7 +4585,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, | |||
4575 | ret = btrfs_insert_dir_item(trans, root, name, name_len, | 4585 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
4576 | parent_inode, &key, | 4586 | parent_inode, &key, |
4577 | btrfs_inode_type(inode), index); | 4587 | btrfs_inode_type(inode), index); |
4578 | BUG_ON(ret); | 4588 | if (ret) |
4589 | goto fail_dir_item; | ||
4579 | 4590 | ||
4580 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | 4591 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
4581 | name_len * 2); | 4592 | name_len * 2); |
@@ -4583,6 +4594,23 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, | |||
4583 | ret = btrfs_update_inode(trans, root, parent_inode); | 4594 | ret = btrfs_update_inode(trans, root, parent_inode); |
4584 | } | 4595 | } |
4585 | return ret; | 4596 | return ret; |
4597 | |||
4598 | fail_dir_item: | ||
4599 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | ||
4600 | u64 local_index; | ||
4601 | int err; | ||
4602 | err = btrfs_del_root_ref(trans, root->fs_info->tree_root, | ||
4603 | key.objectid, root->root_key.objectid, | ||
4604 | parent_ino, &local_index, name, name_len); | ||
4605 | |||
4606 | } else if (add_backref) { | ||
4607 | u64 local_index; | ||
4608 | int err; | ||
4609 | |||
4610 | err = btrfs_del_inode_ref(trans, root, name, name_len, | ||
4611 | ino, parent_ino, &local_index); | ||
4612 | } | ||
4613 | return ret; | ||
4586 | } | 4614 | } |
4587 | 4615 | ||
4588 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | 4616 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, |
@@ -6696,8 +6724,10 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |||
6696 | int err; | 6724 | int err; |
6697 | u64 index = 0; | 6725 | u64 index = 0; |
6698 | 6726 | ||
6699 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, | 6727 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, |
6700 | new_dirid, S_IFDIR | 0700, &index); | 6728 | new_dirid, new_dirid, |
6729 | S_IFDIR | (~current_umask() & S_IRWXUGO), | ||
6730 | &index); | ||
6701 | if (IS_ERR(inode)) | 6731 | if (IS_ERR(inode)) |
6702 | return PTR_ERR(inode); | 6732 | return PTR_ERR(inode); |
6703 | inode->i_op = &btrfs_dir_inode_operations; | 6733 | inode->i_op = &btrfs_dir_inode_operations; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 03bb62a9ee24..d8b54715c2de 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -861,6 +861,7 @@ static int cluster_pages_for_defrag(struct inode *inode, | |||
861 | int i_done; | 861 | int i_done; |
862 | struct btrfs_ordered_extent *ordered; | 862 | struct btrfs_ordered_extent *ordered; |
863 | struct extent_state *cached_state = NULL; | 863 | struct extent_state *cached_state = NULL; |
864 | struct extent_io_tree *tree; | ||
864 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); | 865 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); |
865 | 866 | ||
866 | if (isize == 0) | 867 | if (isize == 0) |
@@ -871,18 +872,34 @@ static int cluster_pages_for_defrag(struct inode *inode, | |||
871 | num_pages << PAGE_CACHE_SHIFT); | 872 | num_pages << PAGE_CACHE_SHIFT); |
872 | if (ret) | 873 | if (ret) |
873 | return ret; | 874 | return ret; |
874 | again: | ||
875 | ret = 0; | ||
876 | i_done = 0; | 875 | i_done = 0; |
876 | tree = &BTRFS_I(inode)->io_tree; | ||
877 | 877 | ||
878 | /* step one, lock all the pages */ | 878 | /* step one, lock all the pages */ |
879 | for (i = 0; i < num_pages; i++) { | 879 | for (i = 0; i < num_pages; i++) { |
880 | struct page *page; | 880 | struct page *page; |
881 | again: | ||
881 | page = find_or_create_page(inode->i_mapping, | 882 | page = find_or_create_page(inode->i_mapping, |
882 | start_index + i, mask); | 883 | start_index + i, mask); |
883 | if (!page) | 884 | if (!page) |
884 | break; | 885 | break; |
885 | 886 | ||
887 | page_start = page_offset(page); | ||
888 | page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
889 | while (1) { | ||
890 | lock_extent(tree, page_start, page_end, GFP_NOFS); | ||
891 | ordered = btrfs_lookup_ordered_extent(inode, | ||
892 | page_start); | ||
893 | unlock_extent(tree, page_start, page_end, GFP_NOFS); | ||
894 | if (!ordered) | ||
895 | break; | ||
896 | |||
897 | unlock_page(page); | ||
898 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
899 | btrfs_put_ordered_extent(ordered); | ||
900 | lock_page(page); | ||
901 | } | ||
902 | |||
886 | if (!PageUptodate(page)) { | 903 | if (!PageUptodate(page)) { |
887 | btrfs_readpage(NULL, page); | 904 | btrfs_readpage(NULL, page); |
888 | lock_page(page); | 905 | lock_page(page); |
@@ -893,15 +910,22 @@ again: | |||
893 | break; | 910 | break; |
894 | } | 911 | } |
895 | } | 912 | } |
913 | |||
896 | isize = i_size_read(inode); | 914 | isize = i_size_read(inode); |
897 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; | 915 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; |
898 | if (!isize || page->index > file_end || | 916 | if (!isize || page->index > file_end) { |
899 | page->mapping != inode->i_mapping) { | ||
900 | /* whoops, we blew past eof, skip this page */ | 917 | /* whoops, we blew past eof, skip this page */ |
901 | unlock_page(page); | 918 | unlock_page(page); |
902 | page_cache_release(page); | 919 | page_cache_release(page); |
903 | break; | 920 | break; |
904 | } | 921 | } |
922 | |||
923 | if (page->mapping != inode->i_mapping) { | ||
924 | unlock_page(page); | ||
925 | page_cache_release(page); | ||
926 | goto again; | ||
927 | } | ||
928 | |||
905 | pages[i] = page; | 929 | pages[i] = page; |
906 | i_done++; | 930 | i_done++; |
907 | } | 931 | } |
@@ -924,25 +948,6 @@ again: | |||
924 | lock_extent_bits(&BTRFS_I(inode)->io_tree, | 948 | lock_extent_bits(&BTRFS_I(inode)->io_tree, |
925 | page_start, page_end - 1, 0, &cached_state, | 949 | page_start, page_end - 1, 0, &cached_state, |
926 | GFP_NOFS); | 950 | GFP_NOFS); |
927 | ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); | ||
928 | if (ordered && | ||
929 | ordered->file_offset + ordered->len > page_start && | ||
930 | ordered->file_offset < page_end) { | ||
931 | btrfs_put_ordered_extent(ordered); | ||
932 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | ||
933 | page_start, page_end - 1, | ||
934 | &cached_state, GFP_NOFS); | ||
935 | for (i = 0; i < i_done; i++) { | ||
936 | unlock_page(pages[i]); | ||
937 | page_cache_release(pages[i]); | ||
938 | } | ||
939 | btrfs_wait_ordered_range(inode, page_start, | ||
940 | page_end - page_start); | ||
941 | goto again; | ||
942 | } | ||
943 | if (ordered) | ||
944 | btrfs_put_ordered_extent(ordered); | ||
945 | |||
946 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, | 951 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, |
947 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | 952 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
948 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | 953 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, |
@@ -1327,6 +1332,12 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, | |||
1327 | goto out; | 1332 | goto out; |
1328 | } | 1333 | } |
1329 | 1334 | ||
1335 | if (name[0] == '.' && | ||
1336 | (namelen == 1 || (name[1] == '.' && namelen == 2))) { | ||
1337 | ret = -EEXIST; | ||
1338 | goto out; | ||
1339 | } | ||
1340 | |||
1330 | if (subvol) { | 1341 | if (subvol) { |
1331 | ret = btrfs_mksubvol(&file->f_path, name, namelen, | 1342 | ret = btrfs_mksubvol(&file->f_path, name, namelen, |
1332 | NULL, transid, readonly); | 1343 | NULL, transid, readonly); |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9770cc5bfb76..abc0fbffa510 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1367,7 +1367,8 @@ out: | |||
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | 1369 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, |
1370 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) | 1370 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length, |
1371 | u64 dev_offset) | ||
1371 | { | 1372 | { |
1372 | struct btrfs_mapping_tree *map_tree = | 1373 | struct btrfs_mapping_tree *map_tree = |
1373 | &sdev->dev->dev_root->fs_info->mapping_tree; | 1374 | &sdev->dev->dev_root->fs_info->mapping_tree; |
@@ -1391,7 +1392,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | |||
1391 | goto out; | 1392 | goto out; |
1392 | 1393 | ||
1393 | for (i = 0; i < map->num_stripes; ++i) { | 1394 | for (i = 0; i < map->num_stripes; ++i) { |
1394 | if (map->stripes[i].dev == sdev->dev) { | 1395 | if (map->stripes[i].dev == sdev->dev && |
1396 | map->stripes[i].physical == dev_offset) { | ||
1395 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); | 1397 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); |
1396 | if (ret) | 1398 | if (ret) |
1397 | goto out; | 1399 | goto out; |
@@ -1487,7 +1489,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1487 | break; | 1489 | break; |
1488 | } | 1490 | } |
1489 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | 1491 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, |
1490 | chunk_offset, length); | 1492 | chunk_offset, length, found_key.offset); |
1491 | btrfs_put_block_group(cache); | 1493 | btrfs_put_block_group(cache); |
1492 | if (ret) | 1494 | if (ret) |
1493 | break; | 1495 | break; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 287a6728b1ad..04b77e3ceb7a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -327,7 +327,8 @@ again: | |||
327 | 327 | ||
328 | if (num_bytes) { | 328 | if (num_bytes) { |
329 | trace_btrfs_space_reservation(root->fs_info, "transaction", | 329 | trace_btrfs_space_reservation(root->fs_info, "transaction", |
330 | (u64)h, num_bytes, 1); | 330 | (u64)(unsigned long)h, |
331 | num_bytes, 1); | ||
331 | h->block_rsv = &root->fs_info->trans_block_rsv; | 332 | h->block_rsv = &root->fs_info->trans_block_rsv; |
332 | h->bytes_reserved = num_bytes; | 333 | h->bytes_reserved = num_bytes; |
333 | } | 334 | } |
@@ -915,7 +916,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
915 | dentry->d_name.name, dentry->d_name.len, | 916 | dentry->d_name.name, dentry->d_name.len, |
916 | parent_inode, &key, | 917 | parent_inode, &key, |
917 | BTRFS_FT_DIR, index); | 918 | BTRFS_FT_DIR, index); |
918 | BUG_ON(ret); | 919 | if (ret) { |
920 | pending->error = -EEXIST; | ||
921 | dput(parent); | ||
922 | goto fail; | ||
923 | } | ||
919 | 924 | ||
920 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | 925 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
921 | dentry->d_name.len * 2); | 926 | dentry->d_name.len * 2); |
@@ -993,12 +998,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, | |||
993 | { | 998 | { |
994 | struct btrfs_pending_snapshot *pending; | 999 | struct btrfs_pending_snapshot *pending; |
995 | struct list_head *head = &trans->transaction->pending_snapshots; | 1000 | struct list_head *head = &trans->transaction->pending_snapshots; |
996 | int ret; | ||
997 | 1001 | ||
998 | list_for_each_entry(pending, head, list) { | 1002 | list_for_each_entry(pending, head, list) |
999 | ret = create_pending_snapshot(trans, fs_info, pending); | 1003 | create_pending_snapshot(trans, fs_info, pending); |
1000 | BUG_ON(ret); | ||
1001 | } | ||
1002 | return 0; | 1004 | return 0; |
1003 | } | 1005 | } |
1004 | 1006 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0b4e2af7954d..ef41f285a475 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -459,12 +459,23 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) | |||
459 | { | 459 | { |
460 | struct btrfs_device *device, *next; | 460 | struct btrfs_device *device, *next; |
461 | 461 | ||
462 | struct block_device *latest_bdev = NULL; | ||
463 | u64 latest_devid = 0; | ||
464 | u64 latest_transid = 0; | ||
465 | |||
462 | mutex_lock(&uuid_mutex); | 466 | mutex_lock(&uuid_mutex); |
463 | again: | 467 | again: |
464 | /* This is the initialized path, it is safe to release the devices. */ | 468 | /* This is the initialized path, it is safe to release the devices. */ |
465 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { | 469 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
466 | if (device->in_fs_metadata) | 470 | if (device->in_fs_metadata) { |
471 | if (!latest_transid || | ||
472 | device->generation > latest_transid) { | ||
473 | latest_devid = device->devid; | ||
474 | latest_transid = device->generation; | ||
475 | latest_bdev = device->bdev; | ||
476 | } | ||
467 | continue; | 477 | continue; |
478 | } | ||
468 | 479 | ||
469 | if (device->bdev) { | 480 | if (device->bdev) { |
470 | blkdev_put(device->bdev, device->mode); | 481 | blkdev_put(device->bdev, device->mode); |
@@ -487,6 +498,10 @@ again: | |||
487 | goto again; | 498 | goto again; |
488 | } | 499 | } |
489 | 500 | ||
501 | fs_devices->latest_bdev = latest_bdev; | ||
502 | fs_devices->latest_devid = latest_devid; | ||
503 | fs_devices->latest_trans = latest_transid; | ||
504 | |||
490 | mutex_unlock(&uuid_mutex); | 505 | mutex_unlock(&uuid_mutex); |
491 | return 0; | 506 | return 0; |
492 | } | 507 | } |
@@ -1953,7 +1968,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, | |||
1953 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); | 1968 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); |
1954 | read_unlock(&em_tree->lock); | 1969 | read_unlock(&em_tree->lock); |
1955 | 1970 | ||
1956 | BUG_ON(em->start > chunk_offset || | 1971 | BUG_ON(!em || em->start > chunk_offset || |
1957 | em->start + em->len < chunk_offset); | 1972 | em->start + em->len < chunk_offset); |
1958 | map = (struct map_lookup *)em->bdev; | 1973 | map = (struct map_lookup *)em->bdev; |
1959 | 1974 | ||
@@ -4356,6 +4371,20 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
4356 | return -ENOMEM; | 4371 | return -ENOMEM; |
4357 | btrfs_set_buffer_uptodate(sb); | 4372 | btrfs_set_buffer_uptodate(sb); |
4358 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); | 4373 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); |
4374 | /* | ||
4375 | * The sb extent buffer is artifical and just used to read the system array. | ||
4376 | * btrfs_set_buffer_uptodate() call does not properly mark all it's | ||
4377 | * pages up-to-date when the page is larger: extent does not cover the | ||
4378 | * whole page and consequently check_page_uptodate does not find all | ||
4379 | * the page's extents up-to-date (the hole beyond sb), | ||
4380 | * write_extent_buffer then triggers a WARN_ON. | ||
4381 | * | ||
4382 | * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, | ||
4383 | * but sb spans only this function. Add an explicit SetPageUptodate call | ||
4384 | * to silence the warning eg. on PowerPC 64. | ||
4385 | */ | ||
4386 | if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) | ||
4387 | SetPageUptodate(sb->first_page); | ||
4359 | 4388 | ||
4360 | write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); | 4389 | write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); |
4361 | array_size = btrfs_super_sys_array_size(super_copy); | 4390 | array_size = btrfs_super_sys_array_size(super_copy); |
diff --git a/fs/compat.c b/fs/compat.c index fa9d721ecfee..07880bae28a9 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -131,41 +131,35 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim | |||
131 | 131 | ||
132 | static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) | 132 | static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) |
133 | { | 133 | { |
134 | compat_ino_t ino = stat->ino; | 134 | struct compat_stat tmp; |
135 | typeof(ubuf->st_uid) uid = 0; | ||
136 | typeof(ubuf->st_gid) gid = 0; | ||
137 | int err; | ||
138 | 135 | ||
139 | SET_UID(uid, stat->uid); | 136 | if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) |
140 | SET_GID(gid, stat->gid); | 137 | return -EOVERFLOW; |
141 | 138 | ||
142 | if ((u64) stat->size > MAX_NON_LFS || | 139 | memset(&tmp, 0, sizeof(tmp)); |
143 | !old_valid_dev(stat->dev) || | 140 | tmp.st_dev = old_encode_dev(stat->dev); |
144 | !old_valid_dev(stat->rdev)) | 141 | tmp.st_ino = stat->ino; |
142 | if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | ||
145 | return -EOVERFLOW; | 143 | return -EOVERFLOW; |
146 | if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) | 144 | tmp.st_mode = stat->mode; |
145 | tmp.st_nlink = stat->nlink; | ||
146 | if (tmp.st_nlink != stat->nlink) | ||
147 | return -EOVERFLOW; | 147 | return -EOVERFLOW; |
148 | 148 | SET_UID(tmp.st_uid, stat->uid); | |
149 | if (clear_user(ubuf, sizeof(*ubuf))) | 149 | SET_GID(tmp.st_gid, stat->gid); |
150 | return -EFAULT; | 150 | tmp.st_rdev = old_encode_dev(stat->rdev); |
151 | 151 | if ((u64) stat->size > MAX_NON_LFS) | |
152 | err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev); | 152 | return -EOVERFLOW; |
153 | err |= __put_user(ino, &ubuf->st_ino); | 153 | tmp.st_size = stat->size; |
154 | err |= __put_user(stat->mode, &ubuf->st_mode); | 154 | tmp.st_atime = stat->atime.tv_sec; |
155 | err |= __put_user(stat->nlink, &ubuf->st_nlink); | 155 | tmp.st_atime_nsec = stat->atime.tv_nsec; |
156 | err |= __put_user(uid, &ubuf->st_uid); | 156 | tmp.st_mtime = stat->mtime.tv_sec; |
157 | err |= __put_user(gid, &ubuf->st_gid); | 157 | tmp.st_mtime_nsec = stat->mtime.tv_nsec; |
158 | err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev); | 158 | tmp.st_ctime = stat->ctime.tv_sec; |
159 | err |= __put_user(stat->size, &ubuf->st_size); | 159 | tmp.st_ctime_nsec = stat->ctime.tv_nsec; |
160 | err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime); | 160 | tmp.st_blocks = stat->blocks; |
161 | err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec); | 161 | tmp.st_blksize = stat->blksize; |
162 | err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime); | 162 | return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
163 | err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec); | ||
164 | err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime); | ||
165 | err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec); | ||
166 | err |= __put_user(stat->blksize, &ubuf->st_blksize); | ||
167 | err |= __put_user(stat->blocks, &ubuf->st_blocks); | ||
168 | return err; | ||
169 | } | 163 | } |
170 | 164 | ||
171 | asmlinkage long compat_sys_newstat(const char __user * filename, | 165 | asmlinkage long compat_sys_newstat(const char __user * filename, |
diff --git a/fs/dcache.c b/fs/dcache.c index 16a53cc2cc02..138be96e25b6 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -104,7 +104,7 @@ static unsigned int d_hash_shift __read_mostly; | |||
104 | 104 | ||
105 | static struct hlist_bl_head *dentry_hashtable __read_mostly; | 105 | static struct hlist_bl_head *dentry_hashtable __read_mostly; |
106 | 106 | ||
107 | static inline struct hlist_bl_head *d_hash(struct dentry *parent, | 107 | static inline struct hlist_bl_head *d_hash(const struct dentry *parent, |
108 | unsigned long hash) | 108 | unsigned long hash) |
109 | { | 109 | { |
110 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; | 110 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; |
@@ -1717,8 +1717,9 @@ EXPORT_SYMBOL(d_add_ci); | |||
1717 | * child is looked up. Thus, an interlocking stepping of sequence lock checks | 1717 | * child is looked up. Thus, an interlocking stepping of sequence lock checks |
1718 | * is formed, giving integrity down the path walk. | 1718 | * is formed, giving integrity down the path walk. |
1719 | */ | 1719 | */ |
1720 | struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | 1720 | struct dentry *__d_lookup_rcu(const struct dentry *parent, |
1721 | unsigned *seq, struct inode **inode) | 1721 | const struct qstr *name, |
1722 | unsigned *seqp, struct inode **inode) | ||
1722 | { | 1723 | { |
1723 | unsigned int len = name->len; | 1724 | unsigned int len = name->len; |
1724 | unsigned int hash = name->hash; | 1725 | unsigned int hash = name->hash; |
@@ -1748,6 +1749,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | |||
1748 | * See Documentation/filesystems/path-lookup.txt for more details. | 1749 | * See Documentation/filesystems/path-lookup.txt for more details. |
1749 | */ | 1750 | */ |
1750 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { | 1751 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
1752 | unsigned seq; | ||
1751 | struct inode *i; | 1753 | struct inode *i; |
1752 | const char *tname; | 1754 | const char *tname; |
1753 | int tlen; | 1755 | int tlen; |
@@ -1756,7 +1758,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | |||
1756 | continue; | 1758 | continue; |
1757 | 1759 | ||
1758 | seqretry: | 1760 | seqretry: |
1759 | *seq = read_seqcount_begin(&dentry->d_seq); | 1761 | seq = read_seqcount_begin(&dentry->d_seq); |
1760 | if (dentry->d_parent != parent) | 1762 | if (dentry->d_parent != parent) |
1761 | continue; | 1763 | continue; |
1762 | if (d_unhashed(dentry)) | 1764 | if (d_unhashed(dentry)) |
@@ -1771,7 +1773,7 @@ seqretry: | |||
1771 | * edge of memory when walking. If we could load this | 1773 | * edge of memory when walking. If we could load this |
1772 | * atomically some other way, we could drop this check. | 1774 | * atomically some other way, we could drop this check. |
1773 | */ | 1775 | */ |
1774 | if (read_seqcount_retry(&dentry->d_seq, *seq)) | 1776 | if (read_seqcount_retry(&dentry->d_seq, seq)) |
1775 | goto seqretry; | 1777 | goto seqretry; |
1776 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { | 1778 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { |
1777 | if (parent->d_op->d_compare(parent, *inode, | 1779 | if (parent->d_op->d_compare(parent, *inode, |
@@ -1788,6 +1790,7 @@ seqretry: | |||
1788 | * order to do anything useful with the returned dentry | 1790 | * order to do anything useful with the returned dentry |
1789 | * anyway. | 1791 | * anyway. |
1790 | */ | 1792 | */ |
1793 | *seqp = seq; | ||
1791 | *inode = i; | 1794 | *inode = i; |
1792 | return dentry; | 1795 | return dentry; |
1793 | } | 1796 | } |
@@ -2968,7 +2971,7 @@ __setup("dhash_entries=", set_dhash_entries); | |||
2968 | 2971 | ||
2969 | static void __init dcache_init_early(void) | 2972 | static void __init dcache_init_early(void) |
2970 | { | 2973 | { |
2971 | int loop; | 2974 | unsigned int loop; |
2972 | 2975 | ||
2973 | /* If hashes are distributed across NUMA nodes, defer | 2976 | /* If hashes are distributed across NUMA nodes, defer |
2974 | * hash allocation until vmalloc space is available. | 2977 | * hash allocation until vmalloc space is available. |
@@ -2986,13 +2989,13 @@ static void __init dcache_init_early(void) | |||
2986 | &d_hash_mask, | 2989 | &d_hash_mask, |
2987 | 0); | 2990 | 0); |
2988 | 2991 | ||
2989 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 2992 | for (loop = 0; loop < (1U << d_hash_shift); loop++) |
2990 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); | 2993 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
2991 | } | 2994 | } |
2992 | 2995 | ||
2993 | static void __init dcache_init(void) | 2996 | static void __init dcache_init(void) |
2994 | { | 2997 | { |
2995 | int loop; | 2998 | unsigned int loop; |
2996 | 2999 | ||
2997 | /* | 3000 | /* |
2998 | * A constructor could be added for stable state like the lists, | 3001 | * A constructor could be added for stable state like the lists, |
@@ -3016,7 +3019,7 @@ static void __init dcache_init(void) | |||
3016 | &d_hash_mask, | 3019 | &d_hash_mask, |
3017 | 0); | 3020 | 0); |
3018 | 3021 | ||
3019 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 3022 | for (loop = 0; loop < (1U << d_hash_shift); loop++) |
3020 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); | 3023 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
3021 | } | 3024 | } |
3022 | 3025 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index 4a588dbd11bf..f4aadd15b613 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -173,7 +173,7 @@ void inode_dio_wait(struct inode *inode) | |||
173 | if (atomic_read(&inode->i_dio_count)) | 173 | if (atomic_read(&inode->i_dio_count)) |
174 | __inode_dio_wait(inode); | 174 | __inode_dio_wait(inode); |
175 | } | 175 | } |
176 | EXPORT_SYMBOL_GPL(inode_dio_wait); | 176 | EXPORT_SYMBOL(inode_dio_wait); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * inode_dio_done - signal finish of a direct I/O requests | 179 | * inode_dio_done - signal finish of a direct I/O requests |
@@ -187,7 +187,7 @@ void inode_dio_done(struct inode *inode) | |||
187 | if (atomic_dec_and_test(&inode->i_dio_count)) | 187 | if (atomic_dec_and_test(&inode->i_dio_count)) |
188 | wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); | 188 | wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); |
189 | } | 189 | } |
190 | EXPORT_SYMBOL_GPL(inode_dio_done); | 190 | EXPORT_SYMBOL(inode_dio_done); |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * How many pages are in the queue? | 193 | * How many pages are in the queue? |
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 349209dc6a91..3a06f4043df4 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c | |||
@@ -429,7 +429,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, | |||
429 | goto memdup; | 429 | goto memdup; |
430 | } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) { | 430 | } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) { |
431 | printk(KERN_WARNING "%s: Acceptable packet size range is " | 431 | printk(KERN_WARNING "%s: Acceptable packet size range is " |
432 | "[%d-%lu], but amount of data written is [%zu].", | 432 | "[%d-%zu], but amount of data written is [%zu].", |
433 | __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count); | 433 | __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count); |
434 | return -EINVAL; | 434 | return -EINVAL; |
435 | } | 435 | } |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aabdfc38cf24..ea54cdef04dd 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -320,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p) | |||
320 | return !list_empty(p); | 320 | return !list_empty(p); |
321 | } | 321 | } |
322 | 322 | ||
323 | static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) | ||
324 | { | ||
325 | return container_of(p, struct eppoll_entry, wait); | ||
326 | } | ||
327 | |||
323 | /* Get the "struct epitem" from a wait queue pointer */ | 328 | /* Get the "struct epitem" from a wait queue pointer */ |
324 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) | 329 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) |
325 | { | 330 | { |
@@ -467,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq) | |||
467 | put_cpu(); | 472 | put_cpu(); |
468 | } | 473 | } |
469 | 474 | ||
475 | static void ep_remove_wait_queue(struct eppoll_entry *pwq) | ||
476 | { | ||
477 | wait_queue_head_t *whead; | ||
478 | |||
479 | rcu_read_lock(); | ||
480 | /* If it is cleared by POLLFREE, it should be rcu-safe */ | ||
481 | whead = rcu_dereference(pwq->whead); | ||
482 | if (whead) | ||
483 | remove_wait_queue(whead, &pwq->wait); | ||
484 | rcu_read_unlock(); | ||
485 | } | ||
486 | |||
470 | /* | 487 | /* |
471 | * This function unregisters poll callbacks from the associated file | 488 | * This function unregisters poll callbacks from the associated file |
472 | * descriptor. Must be called with "mtx" held (or "epmutex" if called from | 489 | * descriptor. Must be called with "mtx" held (or "epmutex" if called from |
@@ -481,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) | |||
481 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); | 498 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); |
482 | 499 | ||
483 | list_del(&pwq->llink); | 500 | list_del(&pwq->llink); |
484 | remove_wait_queue(pwq->whead, &pwq->wait); | 501 | ep_remove_wait_queue(pwq); |
485 | kmem_cache_free(pwq_cache, pwq); | 502 | kmem_cache_free(pwq_cache, pwq); |
486 | } | 503 | } |
487 | } | 504 | } |
@@ -842,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
842 | struct epitem *epi = ep_item_from_wait(wait); | 859 | struct epitem *epi = ep_item_from_wait(wait); |
843 | struct eventpoll *ep = epi->ep; | 860 | struct eventpoll *ep = epi->ep; |
844 | 861 | ||
862 | if ((unsigned long)key & POLLFREE) { | ||
863 | ep_pwq_from_wait(wait)->whead = NULL; | ||
864 | /* | ||
865 | * whead = NULL above can race with ep_remove_wait_queue() | ||
866 | * which can do another remove_wait_queue() after us, so we | ||
867 | * can't use __remove_wait_queue(). whead->lock is held by | ||
868 | * the caller. | ||
869 | */ | ||
870 | list_del_init(&wait->task_list); | ||
871 | } | ||
872 | |||
845 | spin_lock_irqsave(&ep->lock, flags); | 873 | spin_lock_irqsave(&ep->lock, flags); |
846 | 874 | ||
847 | /* | 875 | /* |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 376816fcd040..351a3e797789 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -167,14 +167,19 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl) | |||
167 | spin_unlock(&lru_lock); | 167 | spin_unlock(&lru_lock); |
168 | } | 168 | } |
169 | 169 | ||
170 | static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) | 170 | static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl) |
171 | { | 171 | { |
172 | spin_lock(&lru_lock); | ||
173 | if (!list_empty(&gl->gl_lru)) { | 172 | if (!list_empty(&gl->gl_lru)) { |
174 | list_del_init(&gl->gl_lru); | 173 | list_del_init(&gl->gl_lru); |
175 | atomic_dec(&lru_count); | 174 | atomic_dec(&lru_count); |
176 | clear_bit(GLF_LRU, &gl->gl_flags); | 175 | clear_bit(GLF_LRU, &gl->gl_flags); |
177 | } | 176 | } |
177 | } | ||
178 | |||
179 | static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) | ||
180 | { | ||
181 | spin_lock(&lru_lock); | ||
182 | __gfs2_glock_remove_from_lru(gl); | ||
178 | spin_unlock(&lru_lock); | 183 | spin_unlock(&lru_lock); |
179 | } | 184 | } |
180 | 185 | ||
@@ -217,11 +222,12 @@ void gfs2_glock_put(struct gfs2_glock *gl) | |||
217 | struct gfs2_sbd *sdp = gl->gl_sbd; | 222 | struct gfs2_sbd *sdp = gl->gl_sbd; |
218 | struct address_space *mapping = gfs2_glock2aspace(gl); | 223 | struct address_space *mapping = gfs2_glock2aspace(gl); |
219 | 224 | ||
220 | if (atomic_dec_and_test(&gl->gl_ref)) { | 225 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { |
226 | __gfs2_glock_remove_from_lru(gl); | ||
227 | spin_unlock(&lru_lock); | ||
221 | spin_lock_bucket(gl->gl_hash); | 228 | spin_lock_bucket(gl->gl_hash); |
222 | hlist_bl_del_rcu(&gl->gl_list); | 229 | hlist_bl_del_rcu(&gl->gl_list); |
223 | spin_unlock_bucket(gl->gl_hash); | 230 | spin_unlock_bucket(gl->gl_hash); |
224 | gfs2_glock_remove_from_lru(gl); | ||
225 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 231 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
226 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); | 232 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); |
227 | trace_gfs2_glock_put(gl); | 233 | trace_gfs2_glock_put(gl); |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index a7d611b93f0f..56987460cdae 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -391,10 +391,6 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation) | |||
391 | int error; | 391 | int error; |
392 | int dblocks = 1; | 392 | int dblocks = 1; |
393 | 393 | ||
394 | error = gfs2_rindex_update(sdp); | ||
395 | if (error) | ||
396 | fs_warn(sdp, "rindex update returns %d\n", error); | ||
397 | |||
398 | error = gfs2_inplace_reserve(dip, RES_DINODE); | 394 | error = gfs2_inplace_reserve(dip, RES_DINODE); |
399 | if (error) | 395 | if (error) |
400 | goto out; | 396 | goto out; |
@@ -1043,6 +1039,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) | |||
1043 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); | 1039 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); |
1044 | if (!rgd) | 1040 | if (!rgd) |
1045 | goto out_inodes; | 1041 | goto out_inodes; |
1042 | |||
1046 | gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); | 1043 | gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); |
1047 | 1044 | ||
1048 | 1045 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 6aacf3f230a2..24f609c9ef91 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -800,6 +800,11 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) | |||
800 | fs_err(sdp, "can't get quota file inode: %d\n", error); | 800 | fs_err(sdp, "can't get quota file inode: %d\n", error); |
801 | goto fail_rindex; | 801 | goto fail_rindex; |
802 | } | 802 | } |
803 | |||
804 | error = gfs2_rindex_update(sdp); | ||
805 | if (error) | ||
806 | goto fail_qinode; | ||
807 | |||
803 | return 0; | 808 | return 0; |
804 | 809 | ||
805 | fail_qinode: | 810 | fail_qinode: |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 981bfa32121a..49ada95209d0 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -683,16 +683,21 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp) | |||
683 | struct gfs2_glock *gl = ip->i_gl; | 683 | struct gfs2_glock *gl = ip->i_gl; |
684 | struct gfs2_holder ri_gh; | 684 | struct gfs2_holder ri_gh; |
685 | int error = 0; | 685 | int error = 0; |
686 | int unlock_required = 0; | ||
686 | 687 | ||
687 | /* Read new copy from disk if we don't have the latest */ | 688 | /* Read new copy from disk if we don't have the latest */ |
688 | if (!sdp->sd_rindex_uptodate) { | 689 | if (!sdp->sd_rindex_uptodate) { |
689 | mutex_lock(&sdp->sd_rindex_mutex); | 690 | mutex_lock(&sdp->sd_rindex_mutex); |
690 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); | 691 | if (!gfs2_glock_is_locked_by_me(gl)) { |
691 | if (error) | 692 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); |
692 | return error; | 693 | if (error) |
694 | return error; | ||
695 | unlock_required = 1; | ||
696 | } | ||
693 | if (!sdp->sd_rindex_uptodate) | 697 | if (!sdp->sd_rindex_uptodate) |
694 | error = gfs2_ri_update(ip); | 698 | error = gfs2_ri_update(ip); |
695 | gfs2_glock_dq_uninit(&ri_gh); | 699 | if (unlock_required) |
700 | gfs2_glock_dq_uninit(&ri_gh); | ||
696 | mutex_unlock(&sdp->sd_rindex_mutex); | 701 | mutex_unlock(&sdp->sd_rindex_mutex); |
697 | } | 702 | } |
698 | 703 | ||
diff --git a/fs/inode.c b/fs/inode.c index fb10d86ffad7..d3ebdbe723d0 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -1651,7 +1651,7 @@ __setup("ihash_entries=", set_ihash_entries); | |||
1651 | */ | 1651 | */ |
1652 | void __init inode_init_early(void) | 1652 | void __init inode_init_early(void) |
1653 | { | 1653 | { |
1654 | int loop; | 1654 | unsigned int loop; |
1655 | 1655 | ||
1656 | /* If hashes are distributed across NUMA nodes, defer | 1656 | /* If hashes are distributed across NUMA nodes, defer |
1657 | * hash allocation until vmalloc space is available. | 1657 | * hash allocation until vmalloc space is available. |
@@ -1669,13 +1669,13 @@ void __init inode_init_early(void) | |||
1669 | &i_hash_mask, | 1669 | &i_hash_mask, |
1670 | 0); | 1670 | 0); |
1671 | 1671 | ||
1672 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | 1672 | for (loop = 0; loop < (1U << i_hash_shift); loop++) |
1673 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | 1673 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | void __init inode_init(void) | 1676 | void __init inode_init(void) |
1677 | { | 1677 | { |
1678 | int loop; | 1678 | unsigned int loop; |
1679 | 1679 | ||
1680 | /* inode slab cache */ | 1680 | /* inode slab cache */ |
1681 | inode_cachep = kmem_cache_create("inode_cache", | 1681 | inode_cachep = kmem_cache_create("inode_cache", |
@@ -1699,7 +1699,7 @@ void __init inode_init(void) | |||
1699 | &i_hash_mask, | 1699 | &i_hash_mask, |
1700 | 0); | 1700 | 0); |
1701 | 1701 | ||
1702 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | 1702 | for (loop = 0; loop < (1U << i_hash_shift); loop++) |
1703 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | 1703 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
1704 | } | 1704 | } |
1705 | 1705 | ||
diff --git a/fs/namei.c b/fs/namei.c index 208c6aa4a989..e2ba62820a0f 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1095,8 +1095,10 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr | |||
1095 | struct dentry *old; | 1095 | struct dentry *old; |
1096 | 1096 | ||
1097 | /* Don't create child dentry for a dead directory. */ | 1097 | /* Don't create child dentry for a dead directory. */ |
1098 | if (unlikely(IS_DEADDIR(inode))) | 1098 | if (unlikely(IS_DEADDIR(inode))) { |
1099 | dput(dentry); | ||
1099 | return ERR_PTR(-ENOENT); | 1100 | return ERR_PTR(-ENOENT); |
1101 | } | ||
1100 | 1102 | ||
1101 | old = inode->i_op->lookup(inode, dentry, nd); | 1103 | old = inode->i_op->lookup(inode, dentry, nd); |
1102 | if (unlikely(old)) { | 1104 | if (unlikely(old)) { |
@@ -1372,6 +1374,34 @@ static inline int can_lookup(struct inode *inode) | |||
1372 | return 1; | 1374 | return 1; |
1373 | } | 1375 | } |
1374 | 1376 | ||
1377 | unsigned int full_name_hash(const unsigned char *name, unsigned int len) | ||
1378 | { | ||
1379 | unsigned long hash = init_name_hash(); | ||
1380 | while (len--) | ||
1381 | hash = partial_name_hash(*name++, hash); | ||
1382 | return end_name_hash(hash); | ||
1383 | } | ||
1384 | EXPORT_SYMBOL(full_name_hash); | ||
1385 | |||
1386 | /* | ||
1387 | * We know there's a real path component here of at least | ||
1388 | * one character. | ||
1389 | */ | ||
1390 | static inline unsigned long hash_name(const char *name, unsigned int *hashp) | ||
1391 | { | ||
1392 | unsigned long hash = init_name_hash(); | ||
1393 | unsigned long len = 0, c; | ||
1394 | |||
1395 | c = (unsigned char)*name; | ||
1396 | do { | ||
1397 | len++; | ||
1398 | hash = partial_name_hash(c, hash); | ||
1399 | c = (unsigned char)name[len]; | ||
1400 | } while (c && c != '/'); | ||
1401 | *hashp = end_name_hash(hash); | ||
1402 | return len; | ||
1403 | } | ||
1404 | |||
1375 | /* | 1405 | /* |
1376 | * Name resolution. | 1406 | * Name resolution. |
1377 | * This is the basic name resolution function, turning a pathname into | 1407 | * This is the basic name resolution function, turning a pathname into |
@@ -1392,31 +1422,22 @@ static int link_path_walk(const char *name, struct nameidata *nd) | |||
1392 | 1422 | ||
1393 | /* At this point we know we have a real path component. */ | 1423 | /* At this point we know we have a real path component. */ |
1394 | for(;;) { | 1424 | for(;;) { |
1395 | unsigned long hash; | ||
1396 | struct qstr this; | 1425 | struct qstr this; |
1397 | unsigned int c; | 1426 | long len; |
1398 | int type; | 1427 | int type; |
1399 | 1428 | ||
1400 | err = may_lookup(nd); | 1429 | err = may_lookup(nd); |
1401 | if (err) | 1430 | if (err) |
1402 | break; | 1431 | break; |
1403 | 1432 | ||
1433 | len = hash_name(name, &this.hash); | ||
1404 | this.name = name; | 1434 | this.name = name; |
1405 | c = *(const unsigned char *)name; | 1435 | this.len = len; |
1406 | |||
1407 | hash = init_name_hash(); | ||
1408 | do { | ||
1409 | name++; | ||
1410 | hash = partial_name_hash(c, hash); | ||
1411 | c = *(const unsigned char *)name; | ||
1412 | } while (c && (c != '/')); | ||
1413 | this.len = name - (const char *) this.name; | ||
1414 | this.hash = end_name_hash(hash); | ||
1415 | 1436 | ||
1416 | type = LAST_NORM; | 1437 | type = LAST_NORM; |
1417 | if (this.name[0] == '.') switch (this.len) { | 1438 | if (name[0] == '.') switch (len) { |
1418 | case 2: | 1439 | case 2: |
1419 | if (this.name[1] == '.') { | 1440 | if (name[1] == '.') { |
1420 | type = LAST_DOTDOT; | 1441 | type = LAST_DOTDOT; |
1421 | nd->flags |= LOOKUP_JUMPED; | 1442 | nd->flags |= LOOKUP_JUMPED; |
1422 | } | 1443 | } |
@@ -1435,12 +1456,18 @@ static int link_path_walk(const char *name, struct nameidata *nd) | |||
1435 | } | 1456 | } |
1436 | } | 1457 | } |
1437 | 1458 | ||
1438 | /* remove trailing slashes? */ | 1459 | if (!name[len]) |
1439 | if (!c) | ||
1440 | goto last_component; | 1460 | goto last_component; |
1441 | while (*++name == '/'); | 1461 | /* |
1442 | if (!*name) | 1462 | * If it wasn't NUL, we know it was '/'. Skip that |
1463 | * slash, and continue until no more slashes. | ||
1464 | */ | ||
1465 | do { | ||
1466 | len++; | ||
1467 | } while (unlikely(name[len] == '/')); | ||
1468 | if (!name[len]) | ||
1443 | goto last_component; | 1469 | goto last_component; |
1470 | name += len; | ||
1444 | 1471 | ||
1445 | err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); | 1472 | err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); |
1446 | if (err < 0) | 1473 | if (err < 0) |
@@ -1773,24 +1800,21 @@ static struct dentry *lookup_hash(struct nameidata *nd) | |||
1773 | struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) | 1800 | struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) |
1774 | { | 1801 | { |
1775 | struct qstr this; | 1802 | struct qstr this; |
1776 | unsigned long hash; | ||
1777 | unsigned int c; | 1803 | unsigned int c; |
1778 | 1804 | ||
1779 | WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); | 1805 | WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); |
1780 | 1806 | ||
1781 | this.name = name; | 1807 | this.name = name; |
1782 | this.len = len; | 1808 | this.len = len; |
1809 | this.hash = full_name_hash(name, len); | ||
1783 | if (!len) | 1810 | if (!len) |
1784 | return ERR_PTR(-EACCES); | 1811 | return ERR_PTR(-EACCES); |
1785 | 1812 | ||
1786 | hash = init_name_hash(); | ||
1787 | while (len--) { | 1813 | while (len--) { |
1788 | c = *(const unsigned char *)name++; | 1814 | c = *(const unsigned char *)name++; |
1789 | if (c == '/' || c == '\0') | 1815 | if (c == '/' || c == '\0') |
1790 | return ERR_PTR(-EACCES); | 1816 | return ERR_PTR(-EACCES); |
1791 | hash = partial_name_hash(c, hash); | ||
1792 | } | 1817 | } |
1793 | this.hash = end_name_hash(hash); | ||
1794 | /* | 1818 | /* |
1795 | * See if the low-level filesystem might want | 1819 | * See if the low-level filesystem might want |
1796 | * to use its own hash.. | 1820 | * to use its own hash.. |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f0c849c98fe4..ec9f6ef6c5dd 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -3575,8 +3575,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu | |||
3575 | } | 3575 | } |
3576 | if (npages > 1) { | 3576 | if (npages > 1) { |
3577 | /* for decoding across pages */ | 3577 | /* for decoding across pages */ |
3578 | args.acl_scratch = alloc_page(GFP_KERNEL); | 3578 | res.acl_scratch = alloc_page(GFP_KERNEL); |
3579 | if (!args.acl_scratch) | 3579 | if (!res.acl_scratch) |
3580 | goto out_free; | 3580 | goto out_free; |
3581 | } | 3581 | } |
3582 | args.acl_len = npages * PAGE_SIZE; | 3582 | args.acl_len = npages * PAGE_SIZE; |
@@ -3612,8 +3612,8 @@ out_free: | |||
3612 | for (i = 0; i < npages; i++) | 3612 | for (i = 0; i < npages; i++) |
3613 | if (pages[i]) | 3613 | if (pages[i]) |
3614 | __free_page(pages[i]); | 3614 | __free_page(pages[i]); |
3615 | if (args.acl_scratch) | 3615 | if (res.acl_scratch) |
3616 | __free_page(args.acl_scratch); | 3616 | __free_page(res.acl_scratch); |
3617 | return ret; | 3617 | return ret; |
3618 | } | 3618 | } |
3619 | 3619 | ||
@@ -4883,8 +4883,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | |||
4883 | clp->cl_rpcclient->cl_auth->au_flavor); | 4883 | clp->cl_rpcclient->cl_auth->au_flavor); |
4884 | 4884 | ||
4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); | 4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); |
4886 | if (unlikely(!res.server_scope)) | 4886 | if (unlikely(!res.server_scope)) { |
4887 | return -ENOMEM; | 4887 | status = -ENOMEM; |
4888 | goto out; | ||
4889 | } | ||
4888 | 4890 | ||
4889 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 4891 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
4890 | if (!status) | 4892 | if (!status) |
@@ -4901,12 +4903,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | |||
4901 | clp->server_scope = NULL; | 4903 | clp->server_scope = NULL; |
4902 | } | 4904 | } |
4903 | 4905 | ||
4904 | if (!clp->server_scope) | 4906 | if (!clp->server_scope) { |
4905 | clp->server_scope = res.server_scope; | 4907 | clp->server_scope = res.server_scope; |
4906 | else | 4908 | goto out; |
4907 | kfree(res.server_scope); | 4909 | } |
4908 | } | 4910 | } |
4909 | 4911 | kfree(res.server_scope); | |
4912 | out: | ||
4910 | dprintk("<-- %s status= %d\n", __func__, status); | 4913 | dprintk("<-- %s status= %d\n", __func__, status); |
4911 | return status; | 4914 | return status; |
4912 | } | 4915 | } |
@@ -5008,37 +5011,53 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) | |||
5008 | return status; | 5011 | return status; |
5009 | } | 5012 | } |
5010 | 5013 | ||
5014 | static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) | ||
5015 | { | ||
5016 | return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); | ||
5017 | } | ||
5018 | |||
5019 | static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, | ||
5020 | struct nfs4_slot *new, | ||
5021 | u32 max_slots, | ||
5022 | u32 ivalue) | ||
5023 | { | ||
5024 | struct nfs4_slot *old = NULL; | ||
5025 | u32 i; | ||
5026 | |||
5027 | spin_lock(&tbl->slot_tbl_lock); | ||
5028 | if (new) { | ||
5029 | old = tbl->slots; | ||
5030 | tbl->slots = new; | ||
5031 | tbl->max_slots = max_slots; | ||
5032 | } | ||
5033 | tbl->highest_used_slotid = -1; /* no slot is currently used */ | ||
5034 | for (i = 0; i < tbl->max_slots; i++) | ||
5035 | tbl->slots[i].seq_nr = ivalue; | ||
5036 | spin_unlock(&tbl->slot_tbl_lock); | ||
5037 | kfree(old); | ||
5038 | } | ||
5039 | |||
5011 | /* | 5040 | /* |
5012 | * Reset a slot table | 5041 | * (re)Initialise a slot table |
5013 | */ | 5042 | */ |
5014 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, | 5043 | static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, |
5015 | int ivalue) | 5044 | u32 ivalue) |
5016 | { | 5045 | { |
5017 | struct nfs4_slot *new = NULL; | 5046 | struct nfs4_slot *new = NULL; |
5018 | int i; | 5047 | int ret = -ENOMEM; |
5019 | int ret = 0; | ||
5020 | 5048 | ||
5021 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, | 5049 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, |
5022 | max_reqs, tbl->max_slots); | 5050 | max_reqs, tbl->max_slots); |
5023 | 5051 | ||
5024 | /* Does the newly negotiated max_reqs match the existing slot table? */ | 5052 | /* Does the newly negotiated max_reqs match the existing slot table? */ |
5025 | if (max_reqs != tbl->max_slots) { | 5053 | if (max_reqs != tbl->max_slots) { |
5026 | ret = -ENOMEM; | 5054 | new = nfs4_alloc_slots(max_reqs, GFP_NOFS); |
5027 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), | ||
5028 | GFP_NOFS); | ||
5029 | if (!new) | 5055 | if (!new) |
5030 | goto out; | 5056 | goto out; |
5031 | ret = 0; | ||
5032 | kfree(tbl->slots); | ||
5033 | } | 5057 | } |
5034 | spin_lock(&tbl->slot_tbl_lock); | 5058 | ret = 0; |
5035 | if (new) { | 5059 | |
5036 | tbl->slots = new; | 5060 | nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); |
5037 | tbl->max_slots = max_reqs; | ||
5038 | } | ||
5039 | for (i = 0; i < tbl->max_slots; ++i) | ||
5040 | tbl->slots[i].seq_nr = ivalue; | ||
5041 | spin_unlock(&tbl->slot_tbl_lock); | ||
5042 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | 5061 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, |
5043 | tbl, tbl->slots, tbl->max_slots); | 5062 | tbl, tbl->slots, tbl->max_slots); |
5044 | out: | 5063 | out: |
@@ -5061,36 +5080,6 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session) | |||
5061 | } | 5080 | } |
5062 | 5081 | ||
5063 | /* | 5082 | /* |
5064 | * Initialize slot table | ||
5065 | */ | ||
5066 | static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, | ||
5067 | int max_slots, int ivalue) | ||
5068 | { | ||
5069 | struct nfs4_slot *slot; | ||
5070 | int ret = -ENOMEM; | ||
5071 | |||
5072 | BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); | ||
5073 | |||
5074 | dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); | ||
5075 | |||
5076 | slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); | ||
5077 | if (!slot) | ||
5078 | goto out; | ||
5079 | ret = 0; | ||
5080 | |||
5081 | spin_lock(&tbl->slot_tbl_lock); | ||
5082 | tbl->max_slots = max_slots; | ||
5083 | tbl->slots = slot; | ||
5084 | tbl->highest_used_slotid = -1; /* no slot is currently used */ | ||
5085 | spin_unlock(&tbl->slot_tbl_lock); | ||
5086 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | ||
5087 | tbl, tbl->slots, tbl->max_slots); | ||
5088 | out: | ||
5089 | dprintk("<-- %s: return %d\n", __func__, ret); | ||
5090 | return ret; | ||
5091 | } | ||
5092 | |||
5093 | /* | ||
5094 | * Initialize or reset the forechannel and backchannel tables | 5083 | * Initialize or reset the forechannel and backchannel tables |
5095 | */ | 5084 | */ |
5096 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) | 5085 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) |
@@ -5101,25 +5090,16 @@ static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) | |||
5101 | dprintk("--> %s\n", __func__); | 5090 | dprintk("--> %s\n", __func__); |
5102 | /* Fore channel */ | 5091 | /* Fore channel */ |
5103 | tbl = &ses->fc_slot_table; | 5092 | tbl = &ses->fc_slot_table; |
5104 | if (tbl->slots == NULL) { | 5093 | status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); |
5105 | status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | 5094 | if (status) /* -ENOMEM */ |
5106 | if (status) /* -ENOMEM */ | 5095 | return status; |
5107 | return status; | ||
5108 | } else { | ||
5109 | status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | ||
5110 | if (status) | ||
5111 | return status; | ||
5112 | } | ||
5113 | /* Back channel */ | 5096 | /* Back channel */ |
5114 | tbl = &ses->bc_slot_table; | 5097 | tbl = &ses->bc_slot_table; |
5115 | if (tbl->slots == NULL) { | 5098 | status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); |
5116 | status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | 5099 | if (status && tbl->slots == NULL) |
5117 | if (status) | 5100 | /* Fore and back channel share a connection so get |
5118 | /* Fore and back channel share a connection so get | 5101 | * both slot tables or neither */ |
5119 | * both slot tables or neither */ | 5102 | nfs4_destroy_slot_tables(ses); |
5120 | nfs4_destroy_slot_tables(ses); | ||
5121 | } else | ||
5122 | status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | ||
5123 | return status; | 5103 | return status; |
5124 | } | 5104 | } |
5125 | 5105 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a53f33b4ac3a..45392032e7bd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1132,6 +1132,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 | |||
1132 | { | 1132 | { |
1133 | struct nfs_client *clp = server->nfs_client; | 1133 | struct nfs_client *clp = server->nfs_client; |
1134 | 1134 | ||
1135 | if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
1136 | nfs_async_inode_return_delegation(state->inode, &state->stateid); | ||
1135 | nfs4_state_mark_reclaim_nograce(clp, state); | 1137 | nfs4_state_mark_reclaim_nograce(clp, state); |
1136 | nfs4_schedule_state_manager(clp); | 1138 | nfs4_schedule_state_manager(clp); |
1137 | } | 1139 | } |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 95e92e438407..33bd8d0f745d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -2522,7 +2522,6 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2522 | 2522 | ||
2523 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, | 2523 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, |
2524 | args->acl_pages, args->acl_pgbase, args->acl_len); | 2524 | args->acl_pages, args->acl_pgbase, args->acl_len); |
2525 | xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); | ||
2526 | 2525 | ||
2527 | encode_nops(&hdr); | 2526 | encode_nops(&hdr); |
2528 | } | 2527 | } |
@@ -6032,6 +6031,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, | |||
6032 | struct compound_hdr hdr; | 6031 | struct compound_hdr hdr; |
6033 | int status; | 6032 | int status; |
6034 | 6033 | ||
6034 | if (res->acl_scratch != NULL) { | ||
6035 | void *p = page_address(res->acl_scratch); | ||
6036 | xdr_set_scratch_buffer(xdr, p, PAGE_SIZE); | ||
6037 | } | ||
6035 | status = decode_compound_hdr(xdr, &hdr); | 6038 | status = decode_compound_hdr(xdr, &hdr); |
6036 | if (status) | 6039 | if (status) |
6037 | goto out; | 6040 | goto out; |
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index f14fde2b03d6..e0281992ddc3 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. | 2 | * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2007 Anton Altaparmakov | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -345,10 +345,10 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | bool is_retry = false; | 346 | bool is_retry = false; |
347 | 347 | ||
348 | BUG_ON(!ni); | ||
348 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", | 349 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", |
349 | ni->mft_no, (unsigned long long)vcn, | 350 | ni->mft_no, (unsigned long long)vcn, |
350 | write_locked ? "write" : "read"); | 351 | write_locked ? "write" : "read"); |
351 | BUG_ON(!ni); | ||
352 | BUG_ON(!NInoNonResident(ni)); | 352 | BUG_ON(!NInoNonResident(ni)); |
353 | BUG_ON(vcn < 0); | 353 | BUG_ON(vcn < 0); |
354 | if (!ni->runlist.rl) { | 354 | if (!ni->runlist.rl) { |
@@ -469,9 +469,9 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, | |||
469 | int err = 0; | 469 | int err = 0; |
470 | bool is_retry = false; | 470 | bool is_retry = false; |
471 | 471 | ||
472 | BUG_ON(!ni); | ||
472 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", | 473 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", |
473 | ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); | 474 | ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); |
474 | BUG_ON(!ni); | ||
475 | BUG_ON(!NInoNonResident(ni)); | 475 | BUG_ON(!NInoNonResident(ni)); |
476 | BUG_ON(vcn < 0); | 476 | BUG_ON(vcn < 0); |
477 | if (!ni->runlist.rl) { | 477 | if (!ni->runlist.rl) { |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 382857f9c7db..3014a36a255b 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. | 2 | * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -1367,7 +1367,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) | |||
1367 | ntfs_error(vol->sb, "Failed to merge runlists for mft " | 1367 | ntfs_error(vol->sb, "Failed to merge runlists for mft " |
1368 | "bitmap."); | 1368 | "bitmap."); |
1369 | if (ntfs_cluster_free_from_rl(vol, rl2)) { | 1369 | if (ntfs_cluster_free_from_rl(vol, rl2)) { |
1370 | ntfs_error(vol->sb, "Failed to dealocate " | 1370 | ntfs_error(vol->sb, "Failed to deallocate " |
1371 | "allocated cluster.%s", es); | 1371 | "allocated cluster.%s", es); |
1372 | NVolSetErrors(vol); | 1372 | NVolSetErrors(vol); |
1373 | } | 1373 | } |
@@ -1805,7 +1805,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) | |||
1805 | ntfs_error(vol->sb, "Failed to merge runlists for mft data " | 1805 | ntfs_error(vol->sb, "Failed to merge runlists for mft data " |
1806 | "attribute."); | 1806 | "attribute."); |
1807 | if (ntfs_cluster_free_from_rl(vol, rl2)) { | 1807 | if (ntfs_cluster_free_from_rl(vol, rl2)) { |
1808 | ntfs_error(vol->sb, "Failed to dealocate clusters " | 1808 | ntfs_error(vol->sb, "Failed to deallocate clusters " |
1809 | "from the mft data attribute.%s", es); | 1809 | "from the mft data attribute.%s", es); |
1810 | NVolSetErrors(vol); | 1810 | NVolSetErrors(vol); |
1811 | } | 1811 | } |
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 5a4a8af5c406..f907611cca73 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. | 2 | * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2001,2002 Richard Russon | 5 | * Copyright (c) 2001,2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -1239,7 +1239,6 @@ static int check_windows_hibernation_status(ntfs_volume *vol) | |||
1239 | { | 1239 | { |
1240 | MFT_REF mref; | 1240 | MFT_REF mref; |
1241 | struct inode *vi; | 1241 | struct inode *vi; |
1242 | ntfs_inode *ni; | ||
1243 | struct page *page; | 1242 | struct page *page; |
1244 | u32 *kaddr, *kend; | 1243 | u32 *kaddr, *kend; |
1245 | ntfs_name *name = NULL; | 1244 | ntfs_name *name = NULL; |
@@ -1290,7 +1289,6 @@ static int check_windows_hibernation_status(ntfs_volume *vol) | |||
1290 | "is not the system volume.", i_size_read(vi)); | 1289 | "is not the system volume.", i_size_read(vi)); |
1291 | goto iput_out; | 1290 | goto iput_out; |
1292 | } | 1291 | } |
1293 | ni = NTFS_I(vi); | ||
1294 | page = ntfs_map_page(vi->i_mapping, 0); | 1292 | page = ntfs_map_page(vi->i_mapping, 0); |
1295 | if (IS_ERR(page)) { | 1293 | if (IS_ERR(page)) { |
1296 | ntfs_error(vol->sb, "Failed to read from hiberfil.sys."); | 1294 | ntfs_error(vol->sb, "Failed to read from hiberfil.sys."); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index be244692550d..a9856e3eaaf0 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -1053,7 +1053,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1053 | handle_t *handle = NULL; | 1053 | handle_t *handle = NULL; |
1054 | struct buffer_head *old_dir_bh = NULL; | 1054 | struct buffer_head *old_dir_bh = NULL; |
1055 | struct buffer_head *new_dir_bh = NULL; | 1055 | struct buffer_head *new_dir_bh = NULL; |
1056 | nlink_t old_dir_nlink = old_dir->i_nlink; | 1056 | u32 old_dir_nlink = old_dir->i_nlink; |
1057 | struct ocfs2_dinode *old_di; | 1057 | struct ocfs2_dinode *old_di; |
1058 | struct ocfs2_dir_lookup_result old_inode_dot_dot_res = { NULL, }; | 1058 | struct ocfs2_dir_lookup_result old_inode_dot_dot_res = { NULL, }; |
1059 | struct ocfs2_dir_lookup_result target_lookup_res = { NULL, }; | 1059 | struct ocfs2_dir_lookup_result target_lookup_res = { NULL, }; |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 7898cd688a00..fc2c4388d126 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -292,11 +292,26 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
292 | } | 292 | } |
293 | } | 293 | } |
294 | 294 | ||
295 | /* Return 1 if 'cmd' will block on frozen filesystem */ | ||
296 | static int quotactl_cmd_write(int cmd) | ||
297 | { | ||
298 | switch (cmd) { | ||
299 | case Q_GETFMT: | ||
300 | case Q_GETINFO: | ||
301 | case Q_SYNC: | ||
302 | case Q_XGETQSTAT: | ||
303 | case Q_XGETQUOTA: | ||
304 | case Q_XQUOTASYNC: | ||
305 | return 0; | ||
306 | } | ||
307 | return 1; | ||
308 | } | ||
309 | |||
295 | /* | 310 | /* |
296 | * look up a superblock on which quota ops will be performed | 311 | * look up a superblock on which quota ops will be performed |
297 | * - use the name of a block device to find the superblock thereon | 312 | * - use the name of a block device to find the superblock thereon |
298 | */ | 313 | */ |
299 | static struct super_block *quotactl_block(const char __user *special) | 314 | static struct super_block *quotactl_block(const char __user *special, int cmd) |
300 | { | 315 | { |
301 | #ifdef CONFIG_BLOCK | 316 | #ifdef CONFIG_BLOCK |
302 | struct block_device *bdev; | 317 | struct block_device *bdev; |
@@ -309,7 +324,10 @@ static struct super_block *quotactl_block(const char __user *special) | |||
309 | putname(tmp); | 324 | putname(tmp); |
310 | if (IS_ERR(bdev)) | 325 | if (IS_ERR(bdev)) |
311 | return ERR_CAST(bdev); | 326 | return ERR_CAST(bdev); |
312 | sb = get_super(bdev); | 327 | if (quotactl_cmd_write(cmd)) |
328 | sb = get_super_thawed(bdev); | ||
329 | else | ||
330 | sb = get_super(bdev); | ||
313 | bdput(bdev); | 331 | bdput(bdev); |
314 | if (!sb) | 332 | if (!sb) |
315 | return ERR_PTR(-ENODEV); | 333 | return ERR_PTR(-ENODEV); |
@@ -361,7 +379,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
361 | pathp = &path; | 379 | pathp = &path; |
362 | } | 380 | } |
363 | 381 | ||
364 | sb = quotactl_block(special); | 382 | sb = quotactl_block(special, cmds); |
365 | if (IS_ERR(sb)) { | 383 | if (IS_ERR(sb)) { |
366 | ret = PTR_ERR(sb); | 384 | ret = PTR_ERR(sb); |
367 | goto out; | 385 | goto out; |
diff --git a/fs/select.c b/fs/select.c index d33418fdc858..e782258d0de3 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -912,7 +912,7 @@ static long do_restart_poll(struct restart_block *restart_block) | |||
912 | } | 912 | } |
913 | 913 | ||
914 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, | 914 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, |
915 | long, timeout_msecs) | 915 | int, timeout_msecs) |
916 | { | 916 | { |
917 | struct timespec end_time, *to = NULL; | 917 | struct timespec end_time, *to = NULL; |
918 | int ret; | 918 | int ret; |
diff --git a/fs/signalfd.c b/fs/signalfd.c index 492465b451dd..7ae2a574cb25 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -30,6 +30,21 @@ | |||
30 | #include <linux/signalfd.h> | 30 | #include <linux/signalfd.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | 32 | ||
33 | void signalfd_cleanup(struct sighand_struct *sighand) | ||
34 | { | ||
35 | wait_queue_head_t *wqh = &sighand->signalfd_wqh; | ||
36 | /* | ||
37 | * The lockless check can race with remove_wait_queue() in progress, | ||
38 | * but in this case its caller should run under rcu_read_lock() and | ||
39 | * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. | ||
40 | */ | ||
41 | if (likely(!waitqueue_active(wqh))) | ||
42 | return; | ||
43 | |||
44 | /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ | ||
45 | wake_up_poll(wqh, POLLHUP | POLLFREE); | ||
46 | } | ||
47 | |||
33 | struct signalfd_ctx { | 48 | struct signalfd_ctx { |
34 | sigset_t sigmask; | 49 | sigset_t sigmask; |
35 | }; | 50 | }; |
diff --git a/fs/super.c b/fs/super.c index 6015c02296b7..6277ec6cb60a 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -634,6 +634,28 @@ rescan: | |||
634 | EXPORT_SYMBOL(get_super); | 634 | EXPORT_SYMBOL(get_super); |
635 | 635 | ||
636 | /** | 636 | /** |
637 | * get_super_thawed - get thawed superblock of a device | ||
638 | * @bdev: device to get the superblock for | ||
639 | * | ||
640 | * Scans the superblock list and finds the superblock of the file system | ||
641 | * mounted on the device. The superblock is returned once it is thawed | ||
642 | * (or immediately if it was not frozen). %NULL is returned if no match | ||
643 | * is found. | ||
644 | */ | ||
645 | struct super_block *get_super_thawed(struct block_device *bdev) | ||
646 | { | ||
647 | while (1) { | ||
648 | struct super_block *s = get_super(bdev); | ||
649 | if (!s || s->s_frozen == SB_UNFROZEN) | ||
650 | return s; | ||
651 | up_read(&s->s_umount); | ||
652 | vfs_check_frozen(s, SB_FREEZE_WRITE); | ||
653 | put_super(s); | ||
654 | } | ||
655 | } | ||
656 | EXPORT_SYMBOL(get_super_thawed); | ||
657 | |||
658 | /** | ||
637 | * get_active_super - get an active reference to the superblock of a device | 659 | * get_active_super - get an active reference to the superblock of a device |
638 | * @bdev: device to get the superblock for | 660 | * @bdev: device to get the superblock for |
639 | * | 661 | * |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index cbcb7bea38e2..53db20ee3e77 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -139,10 +139,10 @@ xfs_qm_adjust_dqtimers( | |||
139 | 139 | ||
140 | if (!d->d_btimer) { | 140 | if (!d->d_btimer) { |
141 | if ((d->d_blk_softlimit && | 141 | if ((d->d_blk_softlimit && |
142 | (be64_to_cpu(d->d_bcount) >= | 142 | (be64_to_cpu(d->d_bcount) > |
143 | be64_to_cpu(d->d_blk_softlimit))) || | 143 | be64_to_cpu(d->d_blk_softlimit))) || |
144 | (d->d_blk_hardlimit && | 144 | (d->d_blk_hardlimit && |
145 | (be64_to_cpu(d->d_bcount) >= | 145 | (be64_to_cpu(d->d_bcount) > |
146 | be64_to_cpu(d->d_blk_hardlimit)))) { | 146 | be64_to_cpu(d->d_blk_hardlimit)))) { |
147 | d->d_btimer = cpu_to_be32(get_seconds() + | 147 | d->d_btimer = cpu_to_be32(get_seconds() + |
148 | mp->m_quotainfo->qi_btimelimit); | 148 | mp->m_quotainfo->qi_btimelimit); |
@@ -151,10 +151,10 @@ xfs_qm_adjust_dqtimers( | |||
151 | } | 151 | } |
152 | } else { | 152 | } else { |
153 | if ((!d->d_blk_softlimit || | 153 | if ((!d->d_blk_softlimit || |
154 | (be64_to_cpu(d->d_bcount) < | 154 | (be64_to_cpu(d->d_bcount) <= |
155 | be64_to_cpu(d->d_blk_softlimit))) && | 155 | be64_to_cpu(d->d_blk_softlimit))) && |
156 | (!d->d_blk_hardlimit || | 156 | (!d->d_blk_hardlimit || |
157 | (be64_to_cpu(d->d_bcount) < | 157 | (be64_to_cpu(d->d_bcount) <= |
158 | be64_to_cpu(d->d_blk_hardlimit)))) { | 158 | be64_to_cpu(d->d_blk_hardlimit)))) { |
159 | d->d_btimer = 0; | 159 | d->d_btimer = 0; |
160 | } | 160 | } |
@@ -162,10 +162,10 @@ xfs_qm_adjust_dqtimers( | |||
162 | 162 | ||
163 | if (!d->d_itimer) { | 163 | if (!d->d_itimer) { |
164 | if ((d->d_ino_softlimit && | 164 | if ((d->d_ino_softlimit && |
165 | (be64_to_cpu(d->d_icount) >= | 165 | (be64_to_cpu(d->d_icount) > |
166 | be64_to_cpu(d->d_ino_softlimit))) || | 166 | be64_to_cpu(d->d_ino_softlimit))) || |
167 | (d->d_ino_hardlimit && | 167 | (d->d_ino_hardlimit && |
168 | (be64_to_cpu(d->d_icount) >= | 168 | (be64_to_cpu(d->d_icount) > |
169 | be64_to_cpu(d->d_ino_hardlimit)))) { | 169 | be64_to_cpu(d->d_ino_hardlimit)))) { |
170 | d->d_itimer = cpu_to_be32(get_seconds() + | 170 | d->d_itimer = cpu_to_be32(get_seconds() + |
171 | mp->m_quotainfo->qi_itimelimit); | 171 | mp->m_quotainfo->qi_itimelimit); |
@@ -174,10 +174,10 @@ xfs_qm_adjust_dqtimers( | |||
174 | } | 174 | } |
175 | } else { | 175 | } else { |
176 | if ((!d->d_ino_softlimit || | 176 | if ((!d->d_ino_softlimit || |
177 | (be64_to_cpu(d->d_icount) < | 177 | (be64_to_cpu(d->d_icount) <= |
178 | be64_to_cpu(d->d_ino_softlimit))) && | 178 | be64_to_cpu(d->d_ino_softlimit))) && |
179 | (!d->d_ino_hardlimit || | 179 | (!d->d_ino_hardlimit || |
180 | (be64_to_cpu(d->d_icount) < | 180 | (be64_to_cpu(d->d_icount) <= |
181 | be64_to_cpu(d->d_ino_hardlimit)))) { | 181 | be64_to_cpu(d->d_ino_hardlimit)))) { |
182 | d->d_itimer = 0; | 182 | d->d_itimer = 0; |
183 | } | 183 | } |
@@ -185,10 +185,10 @@ xfs_qm_adjust_dqtimers( | |||
185 | 185 | ||
186 | if (!d->d_rtbtimer) { | 186 | if (!d->d_rtbtimer) { |
187 | if ((d->d_rtb_softlimit && | 187 | if ((d->d_rtb_softlimit && |
188 | (be64_to_cpu(d->d_rtbcount) >= | 188 | (be64_to_cpu(d->d_rtbcount) > |
189 | be64_to_cpu(d->d_rtb_softlimit))) || | 189 | be64_to_cpu(d->d_rtb_softlimit))) || |
190 | (d->d_rtb_hardlimit && | 190 | (d->d_rtb_hardlimit && |
191 | (be64_to_cpu(d->d_rtbcount) >= | 191 | (be64_to_cpu(d->d_rtbcount) > |
192 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 192 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
193 | d->d_rtbtimer = cpu_to_be32(get_seconds() + | 193 | d->d_rtbtimer = cpu_to_be32(get_seconds() + |
194 | mp->m_quotainfo->qi_rtbtimelimit); | 194 | mp->m_quotainfo->qi_rtbtimelimit); |
@@ -197,10 +197,10 @@ xfs_qm_adjust_dqtimers( | |||
197 | } | 197 | } |
198 | } else { | 198 | } else { |
199 | if ((!d->d_rtb_softlimit || | 199 | if ((!d->d_rtb_softlimit || |
200 | (be64_to_cpu(d->d_rtbcount) < | 200 | (be64_to_cpu(d->d_rtbcount) <= |
201 | be64_to_cpu(d->d_rtb_softlimit))) && | 201 | be64_to_cpu(d->d_rtb_softlimit))) && |
202 | (!d->d_rtb_hardlimit || | 202 | (!d->d_rtb_hardlimit || |
203 | (be64_to_cpu(d->d_rtbcount) < | 203 | (be64_to_cpu(d->d_rtbcount) <= |
204 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 204 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
205 | d->d_rtbtimer = 0; | 205 | d->d_rtbtimer = 0; |
206 | } | 206 | } |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 15ff5392fb65..0ed9ee77937c 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1981,7 +1981,7 @@ xfs_qm_dqcheck( | |||
1981 | 1981 | ||
1982 | if (!errs && ddq->d_id) { | 1982 | if (!errs && ddq->d_id) { |
1983 | if (ddq->d_blk_softlimit && | 1983 | if (ddq->d_blk_softlimit && |
1984 | be64_to_cpu(ddq->d_bcount) >= | 1984 | be64_to_cpu(ddq->d_bcount) > |
1985 | be64_to_cpu(ddq->d_blk_softlimit)) { | 1985 | be64_to_cpu(ddq->d_blk_softlimit)) { |
1986 | if (!ddq->d_btimer) { | 1986 | if (!ddq->d_btimer) { |
1987 | if (flags & XFS_QMOPT_DOWARN) | 1987 | if (flags & XFS_QMOPT_DOWARN) |
@@ -1992,7 +1992,7 @@ xfs_qm_dqcheck( | |||
1992 | } | 1992 | } |
1993 | } | 1993 | } |
1994 | if (ddq->d_ino_softlimit && | 1994 | if (ddq->d_ino_softlimit && |
1995 | be64_to_cpu(ddq->d_icount) >= | 1995 | be64_to_cpu(ddq->d_icount) > |
1996 | be64_to_cpu(ddq->d_ino_softlimit)) { | 1996 | be64_to_cpu(ddq->d_ino_softlimit)) { |
1997 | if (!ddq->d_itimer) { | 1997 | if (!ddq->d_itimer) { |
1998 | if (flags & XFS_QMOPT_DOWARN) | 1998 | if (flags & XFS_QMOPT_DOWARN) |
@@ -2003,7 +2003,7 @@ xfs_qm_dqcheck( | |||
2003 | } | 2003 | } |
2004 | } | 2004 | } |
2005 | if (ddq->d_rtb_softlimit && | 2005 | if (ddq->d_rtb_softlimit && |
2006 | be64_to_cpu(ddq->d_rtbcount) >= | 2006 | be64_to_cpu(ddq->d_rtbcount) > |
2007 | be64_to_cpu(ddq->d_rtb_softlimit)) { | 2007 | be64_to_cpu(ddq->d_rtb_softlimit)) { |
2008 | if (!ddq->d_rtbtimer) { | 2008 | if (!ddq->d_rtbtimer) { |
2009 | if (flags & XFS_QMOPT_DOWARN) | 2009 | if (flags & XFS_QMOPT_DOWARN) |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index eafbcff81f3a..711a86e39ff0 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -813,11 +813,11 @@ xfs_qm_export_dquot( | |||
813 | (XFS_IS_OQUOTA_ENFORCED(mp) && | 813 | (XFS_IS_OQUOTA_ENFORCED(mp) && |
814 | (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && | 814 | (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && |
815 | dst->d_id != 0) { | 815 | dst->d_id != 0) { |
816 | if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && | 816 | if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) && |
817 | (dst->d_blk_softlimit > 0)) { | 817 | (dst->d_blk_softlimit > 0)) { |
818 | ASSERT(dst->d_btimer != 0); | 818 | ASSERT(dst->d_btimer != 0); |
819 | } | 819 | } |
820 | if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && | 820 | if (((int) dst->d_icount > (int) dst->d_ino_softlimit) && |
821 | (dst->d_ino_softlimit > 0)) { | 821 | (dst->d_ino_softlimit > 0)) { |
822 | ASSERT(dst->d_itimer != 0); | 822 | ASSERT(dst->d_itimer != 0); |
823 | } | 823 | } |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 329b06aba1c2..7adcdf15ae0c 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -1151,8 +1151,8 @@ xfs_trans_add_item( | |||
1151 | { | 1151 | { |
1152 | struct xfs_log_item_desc *lidp; | 1152 | struct xfs_log_item_desc *lidp; |
1153 | 1153 | ||
1154 | ASSERT(lip->li_mountp = tp->t_mountp); | 1154 | ASSERT(lip->li_mountp == tp->t_mountp); |
1155 | ASSERT(lip->li_ailp = tp->t_mountp->m_ail); | 1155 | ASSERT(lip->li_ailp == tp->t_mountp->m_ail); |
1156 | 1156 | ||
1157 | lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); | 1157 | lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); |
1158 | 1158 | ||
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 4d00ee67792d..c4ba366d24e6 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
@@ -649,12 +649,12 @@ xfs_trans_dqresv( | |||
649 | * nblks. | 649 | * nblks. |
650 | */ | 650 | */ |
651 | if (hardlimit > 0ULL && | 651 | if (hardlimit > 0ULL && |
652 | hardlimit <= nblks + *resbcountp) { | 652 | hardlimit < nblks + *resbcountp) { |
653 | xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); | 653 | xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); |
654 | goto error_return; | 654 | goto error_return; |
655 | } | 655 | } |
656 | if (softlimit > 0ULL && | 656 | if (softlimit > 0ULL && |
657 | softlimit <= nblks + *resbcountp) { | 657 | softlimit < nblks + *resbcountp) { |
658 | if ((timer != 0 && get_seconds() > timer) || | 658 | if ((timer != 0 && get_seconds() > timer) || |
659 | (warns != 0 && warns >= warnlimit)) { | 659 | (warns != 0 && warns >= warnlimit)) { |
660 | xfs_quota_warn(mp, dqp, | 660 | xfs_quota_warn(mp, dqp, |
@@ -677,11 +677,13 @@ xfs_trans_dqresv( | |||
677 | if (!softlimit) | 677 | if (!softlimit) |
678 | softlimit = q->qi_isoftlimit; | 678 | softlimit = q->qi_isoftlimit; |
679 | 679 | ||
680 | if (hardlimit > 0ULL && count >= hardlimit) { | 680 | if (hardlimit > 0ULL && |
681 | hardlimit < ninos + count) { | ||
681 | xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); | 682 | xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); |
682 | goto error_return; | 683 | goto error_return; |
683 | } | 684 | } |
684 | if (softlimit > 0ULL && count >= softlimit) { | 685 | if (softlimit > 0ULL && |
686 | softlimit < ninos + count) { | ||
685 | if ((timer != 0 && get_seconds() > timer) || | 687 | if ((timer != 0 && get_seconds() > timer) || |
686 | (warns != 0 && warns >= warnlimit)) { | 688 | (warns != 0 && warns >= warnlimit)) { |
687 | xfs_quota_warn(mp, dqp, | 689 | xfs_quota_warn(mp, dqp, |