diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-03-19 12:41:24 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-03-19 12:41:24 -0400 |
commit | 9ce28d827f74d0acdd058bded8bab5309b0f5c8f (patch) | |
tree | 634f22e8df9c7fd3966b3639e3e997436751ca50 /fs | |
parent | f074ff92b5b26f3a559fab1203c36e140ea8d067 (diff) | |
parent | c16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff) |
Merge tag 'v3.3' into staging/for_v3.4
* tag 'v3.3': (1646 commits)
Linux 3.3
Don't limit non-nested epoll paths
netfilter: ctnetlink: fix race between delete and timeout expiration
ipv6: Don't dev_hold(dev) in ip6_mc_find_dev_rcu.
nilfs2: fix NULL pointer dereference in nilfs_load_super_block()
nilfs2: clamp ns_r_segments_percentage to [1, 99]
afs: Remote abort can cause BUG in rxrpc code
afs: Read of file returns EBADMSG
C6X: remove dead code from entry.S
wimax/i2400m: fix erroneous NETDEV_TX_BUSY use
net/hyperv: fix erroneous NETDEV_TX_BUSY use
net/usbnet: reserve headroom on rx skbs
bnx2x: fix memory leak in bnx2x_init_firmware()
bnx2x: fix a crash on corrupt firmware file
sch_sfq: revert dont put new flow at the end of flows
ipv6: fix icmp6_dst_alloc()
MAINTAINERS: Add Serge as maintainer of capabilities
drivers/video/backlight/s6e63m0.c: fix corruption storing gamma mode
MAINTAINERS: add entry for exynos mipi display drivers
MAINTAINERS: fix link to Gustavo Padovans tree
...
Diffstat (limited to 'fs')
119 files changed, 1880 insertions, 1337 deletions
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index d2b0888126d4..a306bb6d88d9 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -109,7 +109,7 @@ struct afs_call { | |||
109 | unsigned reply_size; /* current size of reply */ | 109 | unsigned reply_size; /* current size of reply */ |
110 | unsigned first_offset; /* offset into mapping[first] */ | 110 | unsigned first_offset; /* offset into mapping[first] */ |
111 | unsigned last_to; /* amount of mapping[last] */ | 111 | unsigned last_to; /* amount of mapping[last] */ |
112 | unsigned short offset; /* offset into received data store */ | 112 | unsigned offset; /* offset into received data store */ |
113 | unsigned char unmarshall; /* unmarshalling phase */ | 113 | unsigned char unmarshall; /* unmarshalling phase */ |
114 | bool incoming; /* T if incoming call */ | 114 | bool incoming; /* T if incoming call */ |
115 | bool send_pages; /* T if data from mapping should be sent */ | 115 | bool send_pages; /* T if data from mapping should be sent */ |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index e45a323aebb4..8ad8c2a0703a 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
314 | struct msghdr msg; | 314 | struct msghdr msg; |
315 | struct kvec iov[1]; | 315 | struct kvec iov[1]; |
316 | int ret; | 316 | int ret; |
317 | struct sk_buff *skb; | ||
317 | 318 | ||
318 | _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); | 319 | _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); |
319 | 320 | ||
@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
380 | 381 | ||
381 | error_do_abort: | 382 | error_do_abort: |
382 | rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); | 383 | rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); |
384 | while ((skb = skb_dequeue(&call->rx_queue))) | ||
385 | afs_free_skb(skb); | ||
383 | rxrpc_kernel_end_call(rxcall); | 386 | rxrpc_kernel_end_call(rxcall); |
384 | call->rxcall = NULL; | 387 | call->rxcall = NULL; |
385 | error_kill_call: | 388 | error_kill_call: |
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx) | |||
228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline void get_ioctx(struct kioctx *kioctx) | ||
232 | { | ||
233 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
234 | atomic_inc(&kioctx->users); | ||
235 | } | ||
236 | |||
237 | static inline int try_get_ioctx(struct kioctx *kioctx) | 231 | static inline int try_get_ioctx(struct kioctx *kioctx) |
238 | { | 232 | { |
239 | return atomic_inc_not_zero(&kioctx->users); | 233 | return atomic_inc_not_zero(&kioctx->users); |
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
273 | mm = ctx->mm = current->mm; | 267 | mm = ctx->mm = current->mm; |
274 | atomic_inc(&mm->mm_count); | 268 | atomic_inc(&mm->mm_count); |
275 | 269 | ||
276 | atomic_set(&ctx->users, 1); | 270 | atomic_set(&ctx->users, 2); |
277 | spin_lock_init(&ctx->ctx_lock); | 271 | spin_lock_init(&ctx->ctx_lock); |
278 | spin_lock_init(&ctx->ring_info.ring_lock); | 272 | spin_lock_init(&ctx->ring_info.ring_lock); |
279 | init_waitqueue_head(&ctx->wait); | 273 | init_waitqueue_head(&ctx->wait); |
@@ -490,6 +484,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) | |||
490 | kmem_cache_free(kiocb_cachep, req); | 484 | kmem_cache_free(kiocb_cachep, req); |
491 | ctx->reqs_active--; | 485 | ctx->reqs_active--; |
492 | } | 486 | } |
487 | if (unlikely(!ctx->reqs_active && ctx->dead)) | ||
488 | wake_up_all(&ctx->wait); | ||
493 | spin_unlock_irq(&ctx->ctx_lock); | 489 | spin_unlock_irq(&ctx->ctx_lock); |
494 | } | 490 | } |
495 | 491 | ||
@@ -607,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data) | |||
607 | fput(req->ki_filp); | 603 | fput(req->ki_filp); |
608 | 604 | ||
609 | /* Link the iocb into the context's free list */ | 605 | /* Link the iocb into the context's free list */ |
606 | rcu_read_lock(); | ||
610 | spin_lock_irq(&ctx->ctx_lock); | 607 | spin_lock_irq(&ctx->ctx_lock); |
611 | really_put_req(ctx, req); | 608 | really_put_req(ctx, req); |
609 | /* | ||
610 | * at that point ctx might've been killed, but actual | ||
611 | * freeing is RCU'd | ||
612 | */ | ||
612 | spin_unlock_irq(&ctx->ctx_lock); | 613 | spin_unlock_irq(&ctx->ctx_lock); |
614 | rcu_read_unlock(); | ||
613 | 615 | ||
614 | put_ioctx(ctx); | ||
615 | spin_lock_irq(&fput_lock); | 616 | spin_lock_irq(&fput_lock); |
616 | } | 617 | } |
617 | spin_unlock_irq(&fput_lock); | 618 | spin_unlock_irq(&fput_lock); |
@@ -642,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
642 | * this function will be executed w/out any aio kthread wakeup. | 643 | * this function will be executed w/out any aio kthread wakeup. |
643 | */ | 644 | */ |
644 | if (unlikely(!fput_atomic(req->ki_filp))) { | 645 | if (unlikely(!fput_atomic(req->ki_filp))) { |
645 | get_ioctx(ctx); | ||
646 | spin_lock(&fput_lock); | 646 | spin_lock(&fput_lock); |
647 | list_add(&req->ki_list, &fput_head); | 647 | list_add(&req->ki_list, &fput_head); |
648 | spin_unlock(&fput_lock); | 648 | spin_unlock(&fput_lock); |
@@ -1336,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
1336 | ret = PTR_ERR(ioctx); | 1336 | ret = PTR_ERR(ioctx); |
1337 | if (!IS_ERR(ioctx)) { | 1337 | if (!IS_ERR(ioctx)) { |
1338 | ret = put_user(ioctx->user_id, ctxp); | 1338 | ret = put_user(ioctx->user_id, ctxp); |
1339 | if (!ret) | 1339 | if (!ret) { |
1340 | put_ioctx(ioctx); | ||
1340 | return 0; | 1341 | return 0; |
1341 | 1342 | } | |
1342 | get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | ||
1343 | io_destroy(ioctx); | 1343 | io_destroy(ioctx); |
1344 | } | 1344 | } |
1345 | 1345 | ||
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index d8d8e7ba6a1e..eb1cc92cd67d 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -110,6 +110,7 @@ struct autofs_sb_info { | |||
110 | int sub_version; | 110 | int sub_version; |
111 | int min_proto; | 111 | int min_proto; |
112 | int max_proto; | 112 | int max_proto; |
113 | int compat_daemon; | ||
113 | unsigned long exp_timeout; | 114 | unsigned long exp_timeout; |
114 | unsigned int type; | 115 | unsigned int type; |
115 | int reghost_enabled; | 116 | int reghost_enabled; |
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 76741d8d7786..85f1fcdb30e7 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c | |||
@@ -385,6 +385,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, | |||
385 | sbi->pipefd = pipefd; | 385 | sbi->pipefd = pipefd; |
386 | sbi->pipe = pipe; | 386 | sbi->pipe = pipe; |
387 | sbi->catatonic = 0; | 387 | sbi->catatonic = 0; |
388 | sbi->compat_daemon = is_compat_task(); | ||
388 | } | 389 | } |
389 | out: | 390 | out: |
390 | mutex_unlock(&sbi->wq_mutex); | 391 | mutex_unlock(&sbi->wq_mutex); |
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 450f529a4eae..1feb68ecef95 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c | |||
@@ -124,6 +124,7 @@ start: | |||
124 | /* Negative dentry - try next */ | 124 | /* Negative dentry - try next */ |
125 | if (!simple_positive(q)) { | 125 | if (!simple_positive(q)) { |
126 | spin_unlock(&p->d_lock); | 126 | spin_unlock(&p->d_lock); |
127 | lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_); | ||
127 | p = q; | 128 | p = q; |
128 | goto again; | 129 | goto again; |
129 | } | 130 | } |
@@ -186,6 +187,7 @@ again: | |||
186 | /* Negative dentry - try next */ | 187 | /* Negative dentry - try next */ |
187 | if (!simple_positive(ret)) { | 188 | if (!simple_positive(ret)) { |
188 | spin_unlock(&p->d_lock); | 189 | spin_unlock(&p->d_lock); |
190 | lock_set_subclass(&ret->d_lock.dep_map, 0, _RET_IP_); | ||
189 | p = ret; | 191 | p = ret; |
190 | goto again; | 192 | goto again; |
191 | } | 193 | } |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index e16980b00b8d..06858d955120 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/parser.h> | 19 | #include <linux/parser.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/magic.h> | 21 | #include <linux/magic.h> |
22 | #include <linux/compat.h> | ||
22 | #include "autofs_i.h" | 23 | #include "autofs_i.h" |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | 25 | ||
@@ -224,6 +225,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
224 | set_autofs_type_indirect(&sbi->type); | 225 | set_autofs_type_indirect(&sbi->type); |
225 | sbi->min_proto = 0; | 226 | sbi->min_proto = 0; |
226 | sbi->max_proto = 0; | 227 | sbi->max_proto = 0; |
228 | sbi->compat_daemon = is_compat_task(); | ||
227 | mutex_init(&sbi->wq_mutex); | 229 | mutex_init(&sbi->wq_mutex); |
228 | mutex_init(&sbi->pipe_mutex); | 230 | mutex_init(&sbi->pipe_mutex); |
229 | spin_lock_init(&sbi->fs_lock); | 231 | spin_lock_init(&sbi->fs_lock); |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index da8876d38a7b..9c098db43344 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -91,7 +91,24 @@ static int autofs4_write(struct autofs_sb_info *sbi, | |||
91 | 91 | ||
92 | return (bytes > 0); | 92 | return (bytes > 0); |
93 | } | 93 | } |
94 | 94 | ||
95 | /* | ||
96 | * The autofs_v5 packet was misdesigned. | ||
97 | * | ||
98 | * The packets are identical on x86-32 and x86-64, but have different | ||
99 | * alignment. Which means that 'sizeof()' will give different results. | ||
100 | * Fix it up for the case of running 32-bit user mode on a 64-bit kernel. | ||
101 | */ | ||
102 | static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi) | ||
103 | { | ||
104 | size_t pktsz = sizeof(struct autofs_v5_packet); | ||
105 | #if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT) | ||
106 | if (sbi->compat_daemon > 0) | ||
107 | pktsz -= 4; | ||
108 | #endif | ||
109 | return pktsz; | ||
110 | } | ||
111 | |||
95 | static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | 112 | static void autofs4_notify_daemon(struct autofs_sb_info *sbi, |
96 | struct autofs_wait_queue *wq, | 113 | struct autofs_wait_queue *wq, |
97 | int type) | 114 | int type) |
@@ -155,8 +172,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
155 | { | 172 | { |
156 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; | 173 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; |
157 | 174 | ||
158 | pktsz = sizeof(*packet); | 175 | pktsz = autofs_v5_packet_size(sbi); |
159 | |||
160 | packet->wait_queue_token = wq->wait_queue_token; | 176 | packet->wait_queue_token = wq->wait_queue_token; |
161 | packet->len = wq->name.len; | 177 | packet->len = wq->name.len; |
162 | memcpy(packet->name, wq->name.name, wq->name.len); | 178 | memcpy(packet->name, wq->name.name, wq->name.len); |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index a6395bdb26ae..1ff94054d35a 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -259,6 +259,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
259 | current->mm->free_area_cache = current->mm->mmap_base; | 259 | current->mm->free_area_cache = current->mm->mmap_base; |
260 | current->mm->cached_hole_size = 0; | 260 | current->mm->cached_hole_size = 0; |
261 | 261 | ||
262 | retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); | ||
263 | if (retval < 0) { | ||
264 | /* Someone check-me: is this error path enough? */ | ||
265 | send_sig(SIGKILL, current, 0); | ||
266 | return retval; | ||
267 | } | ||
268 | |||
262 | install_exec_creds(bprm); | 269 | install_exec_creds(bprm); |
263 | current->flags &= ~PF_FORKNOEXEC; | 270 | current->flags &= ~PF_FORKNOEXEC; |
264 | 271 | ||
@@ -352,13 +359,6 @@ beyond_if: | |||
352 | return retval; | 359 | return retval; |
353 | } | 360 | } |
354 | 361 | ||
355 | retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); | ||
356 | if (retval < 0) { | ||
357 | /* Someone check-me: is this error path enough? */ | ||
358 | send_sig(SIGKILL, current, 0); | ||
359 | return retval; | ||
360 | } | ||
361 | |||
362 | current->mm->start_stack = | 362 | current->mm->start_stack = |
363 | (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); | 363 | (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); |
364 | #ifdef __alpha__ | 364 | #ifdef __alpha__ |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index bcb884e2d613..07d096c49920 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1421 | for (i = 1; i < view->n; ++i) { | 1421 | for (i = 1; i < view->n; ++i) { |
1422 | const struct user_regset *regset = &view->regsets[i]; | 1422 | const struct user_regset *regset = &view->regsets[i]; |
1423 | do_thread_regset_writeback(t->task, regset); | 1423 | do_thread_regset_writeback(t->task, regset); |
1424 | if (regset->core_note_type && | 1424 | if (regset->core_note_type && regset->get && |
1425 | (!regset->active || regset->active(t->task, regset))) { | 1425 | (!regset->active || regset->active(t->task, regset))) { |
1426 | int ret; | 1426 | int ret; |
1427 | size_t size = regset->n * regset->size; | 1427 | size_t size = regset->n * regset->size; |
@@ -505,13 +505,9 @@ EXPORT_SYMBOL(bio_clone); | |||
505 | int bio_get_nr_vecs(struct block_device *bdev) | 505 | int bio_get_nr_vecs(struct block_device *bdev) |
506 | { | 506 | { |
507 | struct request_queue *q = bdev_get_queue(bdev); | 507 | struct request_queue *q = bdev_get_queue(bdev); |
508 | int nr_pages; | 508 | return min_t(unsigned, |
509 | 509 | queue_max_segments(q), | |
510 | nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 510 | queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); |
511 | if (nr_pages > queue_max_segments(q)) | ||
512 | nr_pages = queue_max_segments(q); | ||
513 | |||
514 | return nr_pages; | ||
515 | } | 511 | } |
516 | EXPORT_SYMBOL(bio_get_nr_vecs); | 512 | EXPORT_SYMBOL(bio_get_nr_vecs); |
517 | 513 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 0e575d1304b4..5e9f198f7712 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1183 | * The latter is necessary to prevent ghost | 1183 | * The latter is necessary to prevent ghost |
1184 | * partitions on a removed medium. | 1184 | * partitions on a removed medium. |
1185 | */ | 1185 | */ |
1186 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) | 1186 | if (bdev->bd_invalidated) { |
1187 | rescan_partitions(disk, bdev); | 1187 | if (!ret) |
1188 | rescan_partitions(disk, bdev); | ||
1189 | else if (ret == -ENOMEDIUM) | ||
1190 | invalidate_partitions(disk, bdev); | ||
1191 | } | ||
1188 | if (ret) | 1192 | if (ret) |
1189 | goto out_clear; | 1193 | goto out_clear; |
1190 | } else { | 1194 | } else { |
@@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1214 | if (bdev->bd_disk->fops->open) | 1218 | if (bdev->bd_disk->fops->open) |
1215 | ret = bdev->bd_disk->fops->open(bdev, mode); | 1219 | ret = bdev->bd_disk->fops->open(bdev, mode); |
1216 | /* the same as first opener case, read comment there */ | 1220 | /* the same as first opener case, read comment there */ |
1217 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) | 1221 | if (bdev->bd_invalidated) { |
1218 | rescan_partitions(bdev->bd_disk, bdev); | 1222 | if (!ret) |
1223 | rescan_partitions(bdev->bd_disk, bdev); | ||
1224 | else if (ret == -ENOMEDIUM) | ||
1225 | invalidate_partitions(bdev->bd_disk, bdev); | ||
1226 | } | ||
1219 | if (ret) | 1227 | if (ret) |
1220 | goto out_unlock_bdev; | 1228 | goto out_unlock_bdev; |
1221 | } | 1229 | } |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index b9a843226de8..0436c12da8c2 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -297,7 +297,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, | |||
297 | struct btrfs_delayed_extent_op *extent_op = head->extent_op; | 297 | struct btrfs_delayed_extent_op *extent_op = head->extent_op; |
298 | struct rb_node *n = &head->node.rb_node; | 298 | struct rb_node *n = &head->node.rb_node; |
299 | int sgn; | 299 | int sgn; |
300 | int ret; | 300 | int ret = 0; |
301 | 301 | ||
302 | if (extent_op && extent_op->update_key) | 302 | if (extent_op && extent_op->update_key) |
303 | btrfs_disk_key_to_cpu(info_key, &extent_op->key); | 303 | btrfs_disk_key_to_cpu(info_key, &extent_op->key); |
@@ -392,7 +392,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info, | |||
392 | struct btrfs_key *info_key, int *info_level, | 392 | struct btrfs_key *info_key, int *info_level, |
393 | struct list_head *prefs) | 393 | struct list_head *prefs) |
394 | { | 394 | { |
395 | int ret; | 395 | int ret = 0; |
396 | int slot; | 396 | int slot; |
397 | struct extent_buffer *leaf; | 397 | struct extent_buffer *leaf; |
398 | struct btrfs_key key; | 398 | struct btrfs_key key; |
@@ -583,7 +583,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, | |||
583 | struct btrfs_path *path; | 583 | struct btrfs_path *path; |
584 | struct btrfs_key info_key = { 0 }; | 584 | struct btrfs_key info_key = { 0 }; |
585 | struct btrfs_delayed_ref_root *delayed_refs = NULL; | 585 | struct btrfs_delayed_ref_root *delayed_refs = NULL; |
586 | struct btrfs_delayed_ref_head *head = NULL; | 586 | struct btrfs_delayed_ref_head *head; |
587 | int info_level = 0; | 587 | int info_level = 0; |
588 | int ret; | 588 | int ret; |
589 | struct list_head prefs_delayed; | 589 | struct list_head prefs_delayed; |
@@ -607,6 +607,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, | |||
607 | * at a specified point in time | 607 | * at a specified point in time |
608 | */ | 608 | */ |
609 | again: | 609 | again: |
610 | head = NULL; | ||
611 | |||
610 | ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); | 612 | ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); |
611 | if (ret < 0) | 613 | if (ret < 0) |
612 | goto out; | 614 | goto out; |
@@ -635,8 +637,10 @@ again: | |||
635 | goto again; | 637 | goto again; |
636 | } | 638 | } |
637 | ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed); | 639 | ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed); |
638 | if (ret) | 640 | if (ret) { |
641 | spin_unlock(&delayed_refs->lock); | ||
639 | goto out; | 642 | goto out; |
643 | } | ||
640 | } | 644 | } |
641 | spin_unlock(&delayed_refs->lock); | 645 | spin_unlock(&delayed_refs->lock); |
642 | 646 | ||
@@ -892,6 +896,8 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | |||
892 | if (eb != eb_in) | 896 | if (eb != eb_in) |
893 | free_extent_buffer(eb); | 897 | free_extent_buffer(eb); |
894 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); | 898 | ret = inode_ref_info(parent, 0, fs_root, path, &found_key); |
899 | if (ret > 0) | ||
900 | ret = -ENOENT; | ||
895 | if (ret) | 901 | if (ret) |
896 | break; | 902 | break; |
897 | next_inum = found_key.offset; | 903 | next_inum = found_key.offset; |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index ad0b3ba735b7..d986824bb2b4 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -644,7 +644,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( | |||
644 | static int btrfsic_process_superblock(struct btrfsic_state *state, | 644 | static int btrfsic_process_superblock(struct btrfsic_state *state, |
645 | struct btrfs_fs_devices *fs_devices) | 645 | struct btrfs_fs_devices *fs_devices) |
646 | { | 646 | { |
647 | int ret; | 647 | int ret = 0; |
648 | struct btrfs_super_block *selected_super; | 648 | struct btrfs_super_block *selected_super; |
649 | struct list_head *dev_head = &fs_devices->devices; | 649 | struct list_head *dev_head = &fs_devices->devices; |
650 | struct btrfs_device *device; | 650 | struct btrfs_device *device; |
@@ -1662,7 +1662,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, | |||
1662 | block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, | 1662 | block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, |
1663 | &state->block_hashtable); | 1663 | &state->block_hashtable); |
1664 | if (NULL != block) { | 1664 | if (NULL != block) { |
1665 | u64 bytenr; | 1665 | u64 bytenr = 0; |
1666 | struct list_head *elem_ref_to; | 1666 | struct list_head *elem_ref_to; |
1667 | struct list_head *tmp_ref_to; | 1667 | struct list_head *tmp_ref_to; |
1668 | 1668 | ||
@@ -2777,9 +2777,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh) | |||
2777 | printk(KERN_INFO | 2777 | printk(KERN_INFO |
2778 | "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu)," | 2778 | "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu)," |
2779 | " size=%lu, data=%p, bdev=%p)\n", | 2779 | " size=%lu, data=%p, bdev=%p)\n", |
2780 | rw, bh->b_blocknr, | 2780 | rw, (unsigned long)bh->b_blocknr, |
2781 | (unsigned long long)dev_bytenr, bh->b_size, | 2781 | (unsigned long long)dev_bytenr, |
2782 | bh->b_data, bh->b_bdev); | 2782 | (unsigned long)bh->b_size, bh->b_data, |
2783 | bh->b_bdev); | ||
2783 | btrfsic_process_written_block(dev_state, dev_bytenr, | 2784 | btrfsic_process_written_block(dev_state, dev_bytenr, |
2784 | bh->b_data, bh->b_size, NULL, | 2785 | bh->b_data, bh->b_size, NULL, |
2785 | NULL, bh, rw); | 2786 | NULL, bh, rw); |
@@ -2844,7 +2845,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio) | |||
2844 | printk(KERN_INFO | 2845 | printk(KERN_INFO |
2845 | "submit_bio(rw=0x%x, bi_vcnt=%u," | 2846 | "submit_bio(rw=0x%x, bi_vcnt=%u," |
2846 | " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n", | 2847 | " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n", |
2847 | rw, bio->bi_vcnt, bio->bi_sector, | 2848 | rw, bio->bi_vcnt, (unsigned long)bio->bi_sector, |
2848 | (unsigned long long)dev_bytenr, | 2849 | (unsigned long long)dev_bytenr, |
2849 | bio->bi_bdev); | 2850 | bio->bi_bdev); |
2850 | 2851 | ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 14f1c5a0b2d2..d02c27cd14c7 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -588,6 +588,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
588 | page_offset(bio->bi_io_vec->bv_page), | 588 | page_offset(bio->bi_io_vec->bv_page), |
589 | PAGE_CACHE_SIZE); | 589 | PAGE_CACHE_SIZE); |
590 | read_unlock(&em_tree->lock); | 590 | read_unlock(&em_tree->lock); |
591 | if (!em) | ||
592 | return -EIO; | ||
591 | 593 | ||
592 | compressed_len = em->block_len; | 594 | compressed_len = em->block_len; |
593 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 595 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 27ebe61d3ccc..80b6486fd5e6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -886,7 +886,7 @@ struct btrfs_block_rsv { | |||
886 | u64 reserved; | 886 | u64 reserved; |
887 | struct btrfs_space_info *space_info; | 887 | struct btrfs_space_info *space_info; |
888 | spinlock_t lock; | 888 | spinlock_t lock; |
889 | unsigned int full:1; | 889 | unsigned int full; |
890 | }; | 890 | }; |
891 | 891 | ||
892 | /* | 892 | /* |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7aa9cd36bf1b..534266fe505f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -962,6 +962,13 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) | |||
962 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 962 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
963 | map = &BTRFS_I(page->mapping->host)->extent_tree; | 963 | map = &BTRFS_I(page->mapping->host)->extent_tree; |
964 | 964 | ||
965 | /* | ||
966 | * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing | ||
967 | * slab allocation from alloc_extent_state down the callchain where | ||
968 | * it'd hit a BUG_ON as those flags are not allowed. | ||
969 | */ | ||
970 | gfp_flags &= ~GFP_SLAB_BUG_MASK; | ||
971 | |||
965 | ret = try_release_extent_state(map, tree, page, gfp_flags); | 972 | ret = try_release_extent_state(map, tree, page, gfp_flags); |
966 | if (!ret) | 973 | if (!ret) |
967 | return 0; | 974 | return 0; |
@@ -2253,6 +2260,12 @@ int open_ctree(struct super_block *sb, | |||
2253 | goto fail_sb_buffer; | 2260 | goto fail_sb_buffer; |
2254 | } | 2261 | } |
2255 | 2262 | ||
2263 | if (sectorsize < PAGE_SIZE) { | ||
2264 | printk(KERN_WARNING "btrfs: Incompatible sector size " | ||
2265 | "found on %s\n", sb->s_id); | ||
2266 | goto fail_sb_buffer; | ||
2267 | } | ||
2268 | |||
2256 | mutex_lock(&fs_info->chunk_mutex); | 2269 | mutex_lock(&fs_info->chunk_mutex); |
2257 | ret = btrfs_read_sys_array(tree_root); | 2270 | ret = btrfs_read_sys_array(tree_root); |
2258 | mutex_unlock(&fs_info->chunk_mutex); | 2271 | mutex_unlock(&fs_info->chunk_mutex); |
@@ -2294,6 +2307,12 @@ int open_ctree(struct super_block *sb, | |||
2294 | 2307 | ||
2295 | btrfs_close_extra_devices(fs_devices); | 2308 | btrfs_close_extra_devices(fs_devices); |
2296 | 2309 | ||
2310 | if (!fs_devices->latest_bdev) { | ||
2311 | printk(KERN_CRIT "btrfs: failed to read devices on %s\n", | ||
2312 | sb->s_id); | ||
2313 | goto fail_tree_roots; | ||
2314 | } | ||
2315 | |||
2297 | retry_root_backup: | 2316 | retry_root_backup: |
2298 | blocksize = btrfs_level_size(tree_root, | 2317 | blocksize = btrfs_level_size(tree_root, |
2299 | btrfs_super_root_level(disk_super)); | 2318 | btrfs_super_root_level(disk_super)); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 700879ed64cf..37e0a800d34e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -34,23 +34,24 @@ | |||
34 | #include "locking.h" | 34 | #include "locking.h" |
35 | #include "free-space-cache.h" | 35 | #include "free-space-cache.h" |
36 | 36 | ||
37 | /* control flags for do_chunk_alloc's force field | 37 | /* |
38 | * control flags for do_chunk_alloc's force field | ||
38 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk | 39 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk |
39 | * if we really need one. | 40 | * if we really need one. |
40 | * | 41 | * |
41 | * CHUNK_ALLOC_FORCE means it must try to allocate one | ||
42 | * | ||
43 | * CHUNK_ALLOC_LIMITED means to only try and allocate one | 42 | * CHUNK_ALLOC_LIMITED means to only try and allocate one |
44 | * if we have very few chunks already allocated. This is | 43 | * if we have very few chunks already allocated. This is |
45 | * used as part of the clustering code to help make sure | 44 | * used as part of the clustering code to help make sure |
46 | * we have a good pool of storage to cluster in, without | 45 | * we have a good pool of storage to cluster in, without |
47 | * filling the FS with empty chunks | 46 | * filling the FS with empty chunks |
48 | * | 47 | * |
48 | * CHUNK_ALLOC_FORCE means it must try to allocate one | ||
49 | * | ||
49 | */ | 50 | */ |
50 | enum { | 51 | enum { |
51 | CHUNK_ALLOC_NO_FORCE = 0, | 52 | CHUNK_ALLOC_NO_FORCE = 0, |
52 | CHUNK_ALLOC_FORCE = 1, | 53 | CHUNK_ALLOC_LIMITED = 1, |
53 | CHUNK_ALLOC_LIMITED = 2, | 54 | CHUNK_ALLOC_FORCE = 2, |
54 | }; | 55 | }; |
55 | 56 | ||
56 | /* | 57 | /* |
@@ -3311,7 +3312,8 @@ commit_trans: | |||
3311 | } | 3312 | } |
3312 | data_sinfo->bytes_may_use += bytes; | 3313 | data_sinfo->bytes_may_use += bytes; |
3313 | trace_btrfs_space_reservation(root->fs_info, "space_info", | 3314 | trace_btrfs_space_reservation(root->fs_info, "space_info", |
3314 | (u64)data_sinfo, bytes, 1); | 3315 | (u64)(unsigned long)data_sinfo, |
3316 | bytes, 1); | ||
3315 | spin_unlock(&data_sinfo->lock); | 3317 | spin_unlock(&data_sinfo->lock); |
3316 | 3318 | ||
3317 | return 0; | 3319 | return 0; |
@@ -3332,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) | |||
3332 | spin_lock(&data_sinfo->lock); | 3334 | spin_lock(&data_sinfo->lock); |
3333 | data_sinfo->bytes_may_use -= bytes; | 3335 | data_sinfo->bytes_may_use -= bytes; |
3334 | trace_btrfs_space_reservation(root->fs_info, "space_info", | 3336 | trace_btrfs_space_reservation(root->fs_info, "space_info", |
3335 | (u64)data_sinfo, bytes, 0); | 3337 | (u64)(unsigned long)data_sinfo, |
3338 | bytes, 0); | ||
3336 | spin_unlock(&data_sinfo->lock); | 3339 | spin_unlock(&data_sinfo->lock); |
3337 | } | 3340 | } |
3338 | 3341 | ||
@@ -3414,7 +3417,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3414 | 3417 | ||
3415 | again: | 3418 | again: |
3416 | spin_lock(&space_info->lock); | 3419 | spin_lock(&space_info->lock); |
3417 | if (space_info->force_alloc) | 3420 | if (force < space_info->force_alloc) |
3418 | force = space_info->force_alloc; | 3421 | force = space_info->force_alloc; |
3419 | if (space_info->full) { | 3422 | if (space_info->full) { |
3420 | spin_unlock(&space_info->lock); | 3423 | spin_unlock(&space_info->lock); |
@@ -3610,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root, | |||
3610 | if (space_info != delayed_rsv->space_info) | 3613 | if (space_info != delayed_rsv->space_info) |
3611 | return -ENOSPC; | 3614 | return -ENOSPC; |
3612 | 3615 | ||
3616 | spin_lock(&space_info->lock); | ||
3613 | spin_lock(&delayed_rsv->lock); | 3617 | spin_lock(&delayed_rsv->lock); |
3614 | if (delayed_rsv->size < bytes) { | 3618 | if (space_info->bytes_pinned + delayed_rsv->size < bytes) { |
3615 | spin_unlock(&delayed_rsv->lock); | 3619 | spin_unlock(&delayed_rsv->lock); |
3620 | spin_unlock(&space_info->lock); | ||
3616 | return -ENOSPC; | 3621 | return -ENOSPC; |
3617 | } | 3622 | } |
3618 | spin_unlock(&delayed_rsv->lock); | 3623 | spin_unlock(&delayed_rsv->lock); |
3624 | spin_unlock(&space_info->lock); | ||
3619 | 3625 | ||
3620 | commit: | 3626 | commit: |
3621 | trans = btrfs_join_transaction(root); | 3627 | trans = btrfs_join_transaction(root); |
@@ -3694,9 +3700,9 @@ again: | |||
3694 | if (used + orig_bytes <= space_info->total_bytes) { | 3700 | if (used + orig_bytes <= space_info->total_bytes) { |
3695 | space_info->bytes_may_use += orig_bytes; | 3701 | space_info->bytes_may_use += orig_bytes; |
3696 | trace_btrfs_space_reservation(root->fs_info, | 3702 | trace_btrfs_space_reservation(root->fs_info, |
3697 | "space_info", | 3703 | "space_info", |
3698 | (u64)space_info, | 3704 | (u64)(unsigned long)space_info, |
3699 | orig_bytes, 1); | 3705 | orig_bytes, 1); |
3700 | ret = 0; | 3706 | ret = 0; |
3701 | } else { | 3707 | } else { |
3702 | /* | 3708 | /* |
@@ -3765,9 +3771,9 @@ again: | |||
3765 | if (used + num_bytes < space_info->total_bytes + avail) { | 3771 | if (used + num_bytes < space_info->total_bytes + avail) { |
3766 | space_info->bytes_may_use += orig_bytes; | 3772 | space_info->bytes_may_use += orig_bytes; |
3767 | trace_btrfs_space_reservation(root->fs_info, | 3773 | trace_btrfs_space_reservation(root->fs_info, |
3768 | "space_info", | 3774 | "space_info", |
3769 | (u64)space_info, | 3775 | (u64)(unsigned long)space_info, |
3770 | orig_bytes, 1); | 3776 | orig_bytes, 1); |
3771 | ret = 0; | 3777 | ret = 0; |
3772 | } else { | 3778 | } else { |
3773 | wait_ordered = true; | 3779 | wait_ordered = true; |
@@ -3912,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info, | |||
3912 | spin_lock(&space_info->lock); | 3918 | spin_lock(&space_info->lock); |
3913 | space_info->bytes_may_use -= num_bytes; | 3919 | space_info->bytes_may_use -= num_bytes; |
3914 | trace_btrfs_space_reservation(fs_info, "space_info", | 3920 | trace_btrfs_space_reservation(fs_info, "space_info", |
3915 | (u64)space_info, | 3921 | (u64)(unsigned long)space_info, |
3916 | num_bytes, 0); | 3922 | num_bytes, 0); |
3917 | space_info->reservation_progress++; | 3923 | space_info->reservation_progress++; |
3918 | spin_unlock(&space_info->lock); | 3924 | spin_unlock(&space_info->lock); |
3919 | } | 3925 | } |
@@ -4104,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) | |||
4104 | num_bytes += div64_u64(data_used + meta_used, 50); | 4110 | num_bytes += div64_u64(data_used + meta_used, 50); |
4105 | 4111 | ||
4106 | if (num_bytes * 3 > meta_used) | 4112 | if (num_bytes * 3 > meta_used) |
4107 | num_bytes = div64_u64(meta_used, 3); | 4113 | num_bytes = div64_u64(meta_used, 3) * 2; |
4108 | 4114 | ||
4109 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); | 4115 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); |
4110 | } | 4116 | } |
@@ -4131,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
4131 | block_rsv->reserved += num_bytes; | 4137 | block_rsv->reserved += num_bytes; |
4132 | sinfo->bytes_may_use += num_bytes; | 4138 | sinfo->bytes_may_use += num_bytes; |
4133 | trace_btrfs_space_reservation(fs_info, "space_info", | 4139 | trace_btrfs_space_reservation(fs_info, "space_info", |
4134 | (u64)sinfo, num_bytes, 1); | 4140 | (u64)(unsigned long)sinfo, num_bytes, 1); |
4135 | } | 4141 | } |
4136 | 4142 | ||
4137 | if (block_rsv->reserved >= block_rsv->size) { | 4143 | if (block_rsv->reserved >= block_rsv->size) { |
4138 | num_bytes = block_rsv->reserved - block_rsv->size; | 4144 | num_bytes = block_rsv->reserved - block_rsv->size; |
4139 | sinfo->bytes_may_use -= num_bytes; | 4145 | sinfo->bytes_may_use -= num_bytes; |
4140 | trace_btrfs_space_reservation(fs_info, "space_info", | 4146 | trace_btrfs_space_reservation(fs_info, "space_info", |
4141 | (u64)sinfo, num_bytes, 0); | 4147 | (u64)(unsigned long)sinfo, num_bytes, 0); |
4142 | sinfo->reservation_progress++; | 4148 | sinfo->reservation_progress++; |
4143 | block_rsv->reserved = block_rsv->size; | 4149 | block_rsv->reserved = block_rsv->size; |
4144 | block_rsv->full = 1; | 4150 | block_rsv->full = 1; |
@@ -4191,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, | |||
4191 | if (!trans->bytes_reserved) | 4197 | if (!trans->bytes_reserved) |
4192 | return; | 4198 | return; |
4193 | 4199 | ||
4194 | trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans, | 4200 | trace_btrfs_space_reservation(root->fs_info, "transaction", |
4201 | (u64)(unsigned long)trans, | ||
4195 | trans->bytes_reserved, 0); | 4202 | trans->bytes_reserved, 0); |
4196 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); | 4203 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); |
4197 | trans->bytes_reserved = 0; | 4204 | trans->bytes_reserved = 0; |
@@ -4709,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, | |||
4709 | space_info->bytes_reserved += num_bytes; | 4716 | space_info->bytes_reserved += num_bytes; |
4710 | if (reserve == RESERVE_ALLOC) { | 4717 | if (reserve == RESERVE_ALLOC) { |
4711 | trace_btrfs_space_reservation(cache->fs_info, | 4718 | trace_btrfs_space_reservation(cache->fs_info, |
4712 | "space_info", | 4719 | "space_info", |
4713 | (u64)space_info, | 4720 | (u64)(unsigned long)space_info, |
4714 | num_bytes, 0); | 4721 | num_bytes, 0); |
4715 | space_info->bytes_may_use -= num_bytes; | 4722 | space_info->bytes_may_use -= num_bytes; |
4716 | } | 4723 | } |
4717 | } | 4724 | } |
@@ -5794,6 +5801,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
5794 | u64 search_end, struct btrfs_key *ins, | 5801 | u64 search_end, struct btrfs_key *ins, |
5795 | u64 data) | 5802 | u64 data) |
5796 | { | 5803 | { |
5804 | bool final_tried = false; | ||
5797 | int ret; | 5805 | int ret; |
5798 | u64 search_start = 0; | 5806 | u64 search_start = 0; |
5799 | 5807 | ||
@@ -5813,22 +5821,25 @@ again: | |||
5813 | search_start, search_end, hint_byte, | 5821 | search_start, search_end, hint_byte, |
5814 | ins, data); | 5822 | ins, data); |
5815 | 5823 | ||
5816 | if (ret == -ENOSPC && num_bytes > min_alloc_size) { | 5824 | if (ret == -ENOSPC) { |
5817 | num_bytes = num_bytes >> 1; | 5825 | if (!final_tried) { |
5818 | num_bytes = num_bytes & ~(root->sectorsize - 1); | 5826 | num_bytes = num_bytes >> 1; |
5819 | num_bytes = max(num_bytes, min_alloc_size); | 5827 | num_bytes = num_bytes & ~(root->sectorsize - 1); |
5820 | do_chunk_alloc(trans, root->fs_info->extent_root, | 5828 | num_bytes = max(num_bytes, min_alloc_size); |
5821 | num_bytes, data, CHUNK_ALLOC_FORCE); | 5829 | do_chunk_alloc(trans, root->fs_info->extent_root, |
5822 | goto again; | 5830 | num_bytes, data, CHUNK_ALLOC_FORCE); |
5823 | } | 5831 | if (num_bytes == min_alloc_size) |
5824 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { | 5832 | final_tried = true; |
5825 | struct btrfs_space_info *sinfo; | 5833 | goto again; |
5826 | 5834 | } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) { | |
5827 | sinfo = __find_space_info(root->fs_info, data); | 5835 | struct btrfs_space_info *sinfo; |
5828 | printk(KERN_ERR "btrfs allocation failed flags %llu, " | 5836 | |
5829 | "wanted %llu\n", (unsigned long long)data, | 5837 | sinfo = __find_space_info(root->fs_info, data); |
5830 | (unsigned long long)num_bytes); | 5838 | printk(KERN_ERR "btrfs allocation failed flags %llu, " |
5831 | dump_space_info(sinfo, num_bytes, 1); | 5839 | "wanted %llu\n", (unsigned long long)data, |
5840 | (unsigned long long)num_bytes); | ||
5841 | dump_space_info(sinfo, num_bytes, 1); | ||
5842 | } | ||
5832 | } | 5843 | } |
5833 | 5844 | ||
5834 | trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); | 5845 | trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); |
@@ -7881,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) | |||
7881 | u64 start; | 7892 | u64 start; |
7882 | u64 end; | 7893 | u64 end; |
7883 | u64 trimmed = 0; | 7894 | u64 trimmed = 0; |
7895 | u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); | ||
7884 | int ret = 0; | 7896 | int ret = 0; |
7885 | 7897 | ||
7886 | cache = btrfs_lookup_block_group(fs_info, range->start); | 7898 | /* |
7899 | * try to trim all FS space, our block group may start from non-zero. | ||
7900 | */ | ||
7901 | if (range->len == total_bytes) | ||
7902 | cache = btrfs_lookup_first_block_group(fs_info, range->start); | ||
7903 | else | ||
7904 | cache = btrfs_lookup_block_group(fs_info, range->start); | ||
7887 | 7905 | ||
7888 | while (cache) { | 7906 | while (cache) { |
7889 | if (cache->key.objectid >= (range->start + range->len)) { | 7907 | if (cache->key.objectid >= (range->start + range->len)) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9d09a4f81875..a55fbe6252de 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -513,6 +513,15 @@ hit_next: | |||
513 | WARN_ON(state->end < start); | 513 | WARN_ON(state->end < start); |
514 | last_end = state->end; | 514 | last_end = state->end; |
515 | 515 | ||
516 | if (state->end < end && !need_resched()) | ||
517 | next_node = rb_next(&state->rb_node); | ||
518 | else | ||
519 | next_node = NULL; | ||
520 | |||
521 | /* the state doesn't have the wanted bits, go ahead */ | ||
522 | if (!(state->state & bits)) | ||
523 | goto next; | ||
524 | |||
516 | /* | 525 | /* |
517 | * | ---- desired range ---- | | 526 | * | ---- desired range ---- | |
518 | * | state | or | 527 | * | state | or |
@@ -565,20 +574,15 @@ hit_next: | |||
565 | goto out; | 574 | goto out; |
566 | } | 575 | } |
567 | 576 | ||
568 | if (state->end < end && prealloc && !need_resched()) | ||
569 | next_node = rb_next(&state->rb_node); | ||
570 | else | ||
571 | next_node = NULL; | ||
572 | |||
573 | set |= clear_state_bit(tree, state, &bits, wake); | 577 | set |= clear_state_bit(tree, state, &bits, wake); |
578 | next: | ||
574 | if (last_end == (u64)-1) | 579 | if (last_end == (u64)-1) |
575 | goto out; | 580 | goto out; |
576 | start = last_end + 1; | 581 | start = last_end + 1; |
577 | if (start <= end && next_node) { | 582 | if (start <= end && next_node) { |
578 | state = rb_entry(next_node, struct extent_state, | 583 | state = rb_entry(next_node, struct extent_state, |
579 | rb_node); | 584 | rb_node); |
580 | if (state->start == start) | 585 | goto hit_next; |
581 | goto hit_next; | ||
582 | } | 586 | } |
583 | goto search_again; | 587 | goto search_again; |
584 | 588 | ||
@@ -961,8 +965,6 @@ hit_next: | |||
961 | 965 | ||
962 | set_state_bits(tree, state, &bits); | 966 | set_state_bits(tree, state, &bits); |
963 | clear_state_bit(tree, state, &clear_bits, 0); | 967 | clear_state_bit(tree, state, &clear_bits, 0); |
964 | |||
965 | merge_state(tree, state); | ||
966 | if (last_end == (u64)-1) | 968 | if (last_end == (u64)-1) |
967 | goto out; | 969 | goto out; |
968 | 970 | ||
@@ -1007,7 +1009,6 @@ hit_next: | |||
1007 | if (state->end <= end) { | 1009 | if (state->end <= end) { |
1008 | set_state_bits(tree, state, &bits); | 1010 | set_state_bits(tree, state, &bits); |
1009 | clear_state_bit(tree, state, &clear_bits, 0); | 1011 | clear_state_bit(tree, state, &clear_bits, 0); |
1010 | merge_state(tree, state); | ||
1011 | if (last_end == (u64)-1) | 1012 | if (last_end == (u64)-1) |
1012 | goto out; | 1013 | goto out; |
1013 | start = last_end + 1; | 1014 | start = last_end + 1; |
@@ -1068,8 +1069,6 @@ hit_next: | |||
1068 | 1069 | ||
1069 | set_state_bits(tree, prealloc, &bits); | 1070 | set_state_bits(tree, prealloc, &bits); |
1070 | clear_state_bit(tree, prealloc, &clear_bits, 0); | 1071 | clear_state_bit(tree, prealloc, &clear_bits, 0); |
1071 | |||
1072 | merge_state(tree, prealloc); | ||
1073 | prealloc = NULL; | 1072 | prealloc = NULL; |
1074 | goto out; | 1073 | goto out; |
1075 | } | 1074 | } |
@@ -2154,13 +2153,46 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
2154 | "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, | 2153 | "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, |
2155 | failrec->this_mirror, num_copies, failrec->in_validation); | 2154 | failrec->this_mirror, num_copies, failrec->in_validation); |
2156 | 2155 | ||
2157 | tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror, | 2156 | ret = tree->ops->submit_bio_hook(inode, read_mode, bio, |
2158 | failrec->bio_flags, 0); | 2157 | failrec->this_mirror, |
2159 | return 0; | 2158 | failrec->bio_flags, 0); |
2159 | return ret; | ||
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | /* lots and lots of room for performance fixes in the end_bio funcs */ | 2162 | /* lots and lots of room for performance fixes in the end_bio funcs */ |
2163 | 2163 | ||
2164 | int end_extent_writepage(struct page *page, int err, u64 start, u64 end) | ||
2165 | { | ||
2166 | int uptodate = (err == 0); | ||
2167 | struct extent_io_tree *tree; | ||
2168 | int ret; | ||
2169 | |||
2170 | tree = &BTRFS_I(page->mapping->host)->io_tree; | ||
2171 | |||
2172 | if (tree->ops && tree->ops->writepage_end_io_hook) { | ||
2173 | ret = tree->ops->writepage_end_io_hook(page, start, | ||
2174 | end, NULL, uptodate); | ||
2175 | if (ret) | ||
2176 | uptodate = 0; | ||
2177 | } | ||
2178 | |||
2179 | if (!uptodate && tree->ops && | ||
2180 | tree->ops->writepage_io_failed_hook) { | ||
2181 | ret = tree->ops->writepage_io_failed_hook(NULL, page, | ||
2182 | start, end, NULL); | ||
2183 | /* Writeback already completed */ | ||
2184 | if (ret == 0) | ||
2185 | return 1; | ||
2186 | } | ||
2187 | |||
2188 | if (!uptodate) { | ||
2189 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); | ||
2190 | ClearPageUptodate(page); | ||
2191 | SetPageError(page); | ||
2192 | } | ||
2193 | return 0; | ||
2194 | } | ||
2195 | |||
2164 | /* | 2196 | /* |
2165 | * after a writepage IO is done, we need to: | 2197 | * after a writepage IO is done, we need to: |
2166 | * clear the uptodate bits on error | 2198 | * clear the uptodate bits on error |
@@ -2172,13 +2204,11 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page, | |||
2172 | */ | 2204 | */ |
2173 | static void end_bio_extent_writepage(struct bio *bio, int err) | 2205 | static void end_bio_extent_writepage(struct bio *bio, int err) |
2174 | { | 2206 | { |
2175 | int uptodate = err == 0; | ||
2176 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 2207 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
2177 | struct extent_io_tree *tree; | 2208 | struct extent_io_tree *tree; |
2178 | u64 start; | 2209 | u64 start; |
2179 | u64 end; | 2210 | u64 end; |
2180 | int whole_page; | 2211 | int whole_page; |
2181 | int ret; | ||
2182 | 2212 | ||
2183 | do { | 2213 | do { |
2184 | struct page *page = bvec->bv_page; | 2214 | struct page *page = bvec->bv_page; |
@@ -2195,28 +2225,9 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
2195 | 2225 | ||
2196 | if (--bvec >= bio->bi_io_vec) | 2226 | if (--bvec >= bio->bi_io_vec) |
2197 | prefetchw(&bvec->bv_page->flags); | 2227 | prefetchw(&bvec->bv_page->flags); |
2198 | if (tree->ops && tree->ops->writepage_end_io_hook) { | ||
2199 | ret = tree->ops->writepage_end_io_hook(page, start, | ||
2200 | end, NULL, uptodate); | ||
2201 | if (ret) | ||
2202 | uptodate = 0; | ||
2203 | } | ||
2204 | |||
2205 | if (!uptodate && tree->ops && | ||
2206 | tree->ops->writepage_io_failed_hook) { | ||
2207 | ret = tree->ops->writepage_io_failed_hook(bio, page, | ||
2208 | start, end, NULL); | ||
2209 | if (ret == 0) { | ||
2210 | uptodate = (err == 0); | ||
2211 | continue; | ||
2212 | } | ||
2213 | } | ||
2214 | 2228 | ||
2215 | if (!uptodate) { | 2229 | if (end_extent_writepage(page, err, start, end)) |
2216 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); | 2230 | continue; |
2217 | ClearPageUptodate(page); | ||
2218 | SetPageError(page); | ||
2219 | } | ||
2220 | 2231 | ||
2221 | if (whole_page) | 2232 | if (whole_page) |
2222 | end_page_writeback(page); | 2233 | end_page_writeback(page); |
@@ -2779,9 +2790,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2779 | delalloc_start = delalloc_end + 1; | 2790 | delalloc_start = delalloc_end + 1; |
2780 | continue; | 2791 | continue; |
2781 | } | 2792 | } |
2782 | tree->ops->fill_delalloc(inode, page, delalloc_start, | 2793 | ret = tree->ops->fill_delalloc(inode, page, |
2783 | delalloc_end, &page_started, | 2794 | delalloc_start, |
2784 | &nr_written); | 2795 | delalloc_end, |
2796 | &page_started, | ||
2797 | &nr_written); | ||
2798 | BUG_ON(ret); | ||
2785 | /* | 2799 | /* |
2786 | * delalloc_end is already one less than the total | 2800 | * delalloc_end is already one less than the total |
2787 | * length, so we don't subtract one from | 2801 | * length, so we don't subtract one from |
@@ -2818,8 +2832,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2818 | if (tree->ops && tree->ops->writepage_start_hook) { | 2832 | if (tree->ops && tree->ops->writepage_start_hook) { |
2819 | ret = tree->ops->writepage_start_hook(page, start, | 2833 | ret = tree->ops->writepage_start_hook(page, start, |
2820 | page_end); | 2834 | page_end); |
2821 | if (ret == -EAGAIN) { | 2835 | if (ret) { |
2822 | redirty_page_for_writepage(wbc, page); | 2836 | /* Fixup worker will requeue */ |
2837 | if (ret == -EBUSY) | ||
2838 | wbc->pages_skipped++; | ||
2839 | else | ||
2840 | redirty_page_for_writepage(wbc, page); | ||
2823 | update_nr_written(page, wbc, nr_written); | 2841 | update_nr_written(page, wbc, nr_written); |
2824 | unlock_page(page); | 2842 | unlock_page(page); |
2825 | ret = 0; | 2843 | ret = 0; |
@@ -3289,7 +3307,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
3289 | len = end - start + 1; | 3307 | len = end - start + 1; |
3290 | write_lock(&map->lock); | 3308 | write_lock(&map->lock); |
3291 | em = lookup_extent_mapping(map, start, len); | 3309 | em = lookup_extent_mapping(map, start, len); |
3292 | if (IS_ERR_OR_NULL(em)) { | 3310 | if (!em) { |
3293 | write_unlock(&map->lock); | 3311 | write_unlock(&map->lock); |
3294 | break; | 3312 | break; |
3295 | } | 3313 | } |
@@ -3853,10 +3871,9 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3853 | num_pages = num_extent_pages(eb->start, eb->len); | 3871 | num_pages = num_extent_pages(eb->start, eb->len); |
3854 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3872 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3855 | 3873 | ||
3856 | if (eb_straddles_pages(eb)) { | 3874 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3857 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3875 | cached_state, GFP_NOFS); |
3858 | cached_state, GFP_NOFS); | 3876 | |
3859 | } | ||
3860 | for (i = 0; i < num_pages; i++) { | 3877 | for (i = 0; i < num_pages; i++) { |
3861 | page = extent_buffer_page(eb, i); | 3878 | page = extent_buffer_page(eb, i); |
3862 | if (page) | 3879 | if (page) |
@@ -3909,6 +3926,8 @@ int extent_range_uptodate(struct extent_io_tree *tree, | |||
3909 | while (start <= end) { | 3926 | while (start <= end) { |
3910 | index = start >> PAGE_CACHE_SHIFT; | 3927 | index = start >> PAGE_CACHE_SHIFT; |
3911 | page = find_get_page(tree->mapping, index); | 3928 | page = find_get_page(tree->mapping, index); |
3929 | if (!page) | ||
3930 | return 1; | ||
3912 | uptodate = PageUptodate(page); | 3931 | uptodate = PageUptodate(page); |
3913 | page_cache_release(page); | 3932 | page_cache_release(page); |
3914 | if (!uptodate) { | 3933 | if (!uptodate) { |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index bc6a042cb6fc..cecc3518c121 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -319,4 +319,5 @@ struct btrfs_mapping_tree; | |||
319 | int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | 319 | int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, |
320 | u64 length, u64 logical, struct page *page, | 320 | u64 length, u64 logical, struct page *page, |
321 | int mirror_num); | 321 | int mirror_num); |
322 | int end_extent_writepage(struct page *page, int err, u64 start, u64 end); | ||
322 | #endif | 323 | #endif |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 33a7890b1f40..1195f09761fe 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -26,8 +26,8 @@ struct extent_map { | |||
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | struct block_device *bdev; | 27 | struct block_device *bdev; |
28 | atomic_t refs; | 28 | atomic_t refs; |
29 | unsigned int in_tree:1; | 29 | unsigned int in_tree; |
30 | unsigned int compress_type:4; | 30 | unsigned int compress_type; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct extent_map_tree { | 33 | struct extent_map_tree { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 859ba2dd8890..e8d06b6b9194 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1605,6 +1605,14 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1605 | return -EOPNOTSUPP; | 1605 | return -EOPNOTSUPP; |
1606 | 1606 | ||
1607 | /* | 1607 | /* |
1608 | * Make sure we have enough space before we do the | ||
1609 | * allocation. | ||
1610 | */ | ||
1611 | ret = btrfs_check_data_free_space(inode, len); | ||
1612 | if (ret) | ||
1613 | return ret; | ||
1614 | |||
1615 | /* | ||
1608 | * wait for ordered IO before we have any locks. We'll loop again | 1616 | * wait for ordered IO before we have any locks. We'll loop again |
1609 | * below with the locks held. | 1617 | * below with the locks held. |
1610 | */ | 1618 | */ |
@@ -1667,27 +1675,12 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1667 | if (em->block_start == EXTENT_MAP_HOLE || | 1675 | if (em->block_start == EXTENT_MAP_HOLE || |
1668 | (cur_offset >= inode->i_size && | 1676 | (cur_offset >= inode->i_size && |
1669 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | 1677 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
1670 | |||
1671 | /* | ||
1672 | * Make sure we have enough space before we do the | ||
1673 | * allocation. | ||
1674 | */ | ||
1675 | ret = btrfs_check_data_free_space(inode, last_byte - | ||
1676 | cur_offset); | ||
1677 | if (ret) { | ||
1678 | free_extent_map(em); | ||
1679 | break; | ||
1680 | } | ||
1681 | |||
1682 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, | 1678 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, |
1683 | last_byte - cur_offset, | 1679 | last_byte - cur_offset, |
1684 | 1 << inode->i_blkbits, | 1680 | 1 << inode->i_blkbits, |
1685 | offset + len, | 1681 | offset + len, |
1686 | &alloc_hint); | 1682 | &alloc_hint); |
1687 | 1683 | ||
1688 | /* Let go of our reservation. */ | ||
1689 | btrfs_free_reserved_data_space(inode, last_byte - | ||
1690 | cur_offset); | ||
1691 | if (ret < 0) { | 1684 | if (ret < 0) { |
1692 | free_extent_map(em); | 1685 | free_extent_map(em); |
1693 | break; | 1686 | break; |
@@ -1715,6 +1708,8 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1715 | &cached_state, GFP_NOFS); | 1708 | &cached_state, GFP_NOFS); |
1716 | out: | 1709 | out: |
1717 | mutex_unlock(&inode->i_mutex); | 1710 | mutex_unlock(&inode->i_mutex); |
1711 | /* Let go of our reservation. */ | ||
1712 | btrfs_free_reserved_data_space(inode, len); | ||
1718 | return ret; | 1713 | return ret; |
1719 | } | 1714 | } |
1720 | 1715 | ||
@@ -1761,7 +1756,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) | |||
1761 | start - root->sectorsize, | 1756 | start - root->sectorsize, |
1762 | root->sectorsize, 0); | 1757 | root->sectorsize, 0); |
1763 | if (IS_ERR(em)) { | 1758 | if (IS_ERR(em)) { |
1764 | ret = -ENXIO; | 1759 | ret = PTR_ERR(em); |
1765 | goto out; | 1760 | goto out; |
1766 | } | 1761 | } |
1767 | last_end = em->start + em->len; | 1762 | last_end = em->start + em->len; |
@@ -1773,7 +1768,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) | |||
1773 | while (1) { | 1768 | while (1) { |
1774 | em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); | 1769 | em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); |
1775 | if (IS_ERR(em)) { | 1770 | if (IS_ERR(em)) { |
1776 | ret = -ENXIO; | 1771 | ret = PTR_ERR(em); |
1777 | break; | 1772 | break; |
1778 | } | 1773 | } |
1779 | 1774 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d20ff87ca603..710ea380c7ed 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -777,6 +777,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
777 | spin_lock(&block_group->lock); | 777 | spin_lock(&block_group->lock); |
778 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | 778 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
779 | spin_unlock(&block_group->lock); | 779 | spin_unlock(&block_group->lock); |
780 | btrfs_free_path(path); | ||
780 | goto out; | 781 | goto out; |
781 | } | 782 | } |
782 | spin_unlock(&block_group->lock); | 783 | spin_unlock(&block_group->lock); |
@@ -2242,7 +2243,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
2242 | if (entry->bitmap) { | 2243 | if (entry->bitmap) { |
2243 | ret = btrfs_alloc_from_bitmap(block_group, | 2244 | ret = btrfs_alloc_from_bitmap(block_group, |
2244 | cluster, entry, bytes, | 2245 | cluster, entry, bytes, |
2245 | min_start); | 2246 | cluster->window_start); |
2246 | if (ret == 0) { | 2247 | if (ret == 0) { |
2247 | node = rb_next(&entry->offset_index); | 2248 | node = rb_next(&entry->offset_index); |
2248 | if (!node) | 2249 | if (!node) |
@@ -2251,6 +2252,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
2251 | offset_index); | 2252 | offset_index); |
2252 | continue; | 2253 | continue; |
2253 | } | 2254 | } |
2255 | cluster->window_start += bytes; | ||
2254 | } else { | 2256 | } else { |
2255 | ret = entry->offset; | 2257 | ret = entry->offset; |
2256 | 2258 | ||
@@ -2475,7 +2477,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2475 | } | 2477 | } |
2476 | 2478 | ||
2477 | list_for_each_entry(entry, bitmaps, list) { | 2479 | list_for_each_entry(entry, bitmaps, list) { |
2478 | if (entry->bytes < min_bytes) | 2480 | if (entry->bytes < bytes) |
2479 | continue; | 2481 | continue; |
2480 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | 2482 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, |
2481 | bytes, cont1_bytes, min_bytes); | 2483 | bytes, cont1_bytes, min_bytes); |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 213ffa86ce1b..ee15d88b33d2 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -438,7 +438,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root, | |||
438 | trans->bytes_reserved); | 438 | trans->bytes_reserved); |
439 | if (ret) | 439 | if (ret) |
440 | goto out; | 440 | goto out; |
441 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, | 441 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", |
442 | (u64)(unsigned long)trans, | ||
442 | trans->bytes_reserved, 1); | 443 | trans->bytes_reserved, 1); |
443 | again: | 444 | again: |
444 | inode = lookup_free_ino_inode(root, path); | 445 | inode = lookup_free_ino_inode(root, path); |
@@ -500,7 +501,8 @@ again: | |||
500 | out_put: | 501 | out_put: |
501 | iput(inode); | 502 | iput(inode); |
502 | out_release: | 503 | out_release: |
503 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, | 504 | trace_btrfs_space_reservation(root->fs_info, "ino_cache", |
505 | (u64)(unsigned long)trans, | ||
504 | trans->bytes_reserved, 0); | 506 | trans->bytes_reserved, 0); |
505 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); | 507 | btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); |
506 | out: | 508 | out: |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0da19a0ea00d..892b34785ccc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1555,6 +1555,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) | |||
1555 | struct inode *inode; | 1555 | struct inode *inode; |
1556 | u64 page_start; | 1556 | u64 page_start; |
1557 | u64 page_end; | 1557 | u64 page_end; |
1558 | int ret; | ||
1558 | 1559 | ||
1559 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | 1560 | fixup = container_of(work, struct btrfs_writepage_fixup, work); |
1560 | page = fixup->page; | 1561 | page = fixup->page; |
@@ -1582,12 +1583,21 @@ again: | |||
1582 | page_end, &cached_state, GFP_NOFS); | 1583 | page_end, &cached_state, GFP_NOFS); |
1583 | unlock_page(page); | 1584 | unlock_page(page); |
1584 | btrfs_start_ordered_extent(inode, ordered, 1); | 1585 | btrfs_start_ordered_extent(inode, ordered, 1); |
1586 | btrfs_put_ordered_extent(ordered); | ||
1585 | goto again; | 1587 | goto again; |
1586 | } | 1588 | } |
1587 | 1589 | ||
1588 | BUG(); | 1590 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
1591 | if (ret) { | ||
1592 | mapping_set_error(page->mapping, ret); | ||
1593 | end_extent_writepage(page, ret, page_start, page_end); | ||
1594 | ClearPageChecked(page); | ||
1595 | goto out; | ||
1596 | } | ||
1597 | |||
1589 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); | 1598 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
1590 | ClearPageChecked(page); | 1599 | ClearPageChecked(page); |
1600 | set_page_dirty(page); | ||
1591 | out: | 1601 | out: |
1592 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, | 1602 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1593 | &cached_state, GFP_NOFS); | 1603 | &cached_state, GFP_NOFS); |
@@ -1630,7 +1640,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |||
1630 | fixup->work.func = btrfs_writepage_fixup_worker; | 1640 | fixup->work.func = btrfs_writepage_fixup_worker; |
1631 | fixup->page = page; | 1641 | fixup->page = page; |
1632 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | 1642 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); |
1633 | return -EAGAIN; | 1643 | return -EBUSY; |
1634 | } | 1644 | } |
1635 | 1645 | ||
1636 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | 1646 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
@@ -4575,7 +4585,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, | |||
4575 | ret = btrfs_insert_dir_item(trans, root, name, name_len, | 4585 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
4576 | parent_inode, &key, | 4586 | parent_inode, &key, |
4577 | btrfs_inode_type(inode), index); | 4587 | btrfs_inode_type(inode), index); |
4578 | BUG_ON(ret); | 4588 | if (ret) |
4589 | goto fail_dir_item; | ||
4579 | 4590 | ||
4580 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | 4591 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
4581 | name_len * 2); | 4592 | name_len * 2); |
@@ -4583,6 +4594,23 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, | |||
4583 | ret = btrfs_update_inode(trans, root, parent_inode); | 4594 | ret = btrfs_update_inode(trans, root, parent_inode); |
4584 | } | 4595 | } |
4585 | return ret; | 4596 | return ret; |
4597 | |||
4598 | fail_dir_item: | ||
4599 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | ||
4600 | u64 local_index; | ||
4601 | int err; | ||
4602 | err = btrfs_del_root_ref(trans, root->fs_info->tree_root, | ||
4603 | key.objectid, root->root_key.objectid, | ||
4604 | parent_ino, &local_index, name, name_len); | ||
4605 | |||
4606 | } else if (add_backref) { | ||
4607 | u64 local_index; | ||
4608 | int err; | ||
4609 | |||
4610 | err = btrfs_del_inode_ref(trans, root, name, name_len, | ||
4611 | ino, parent_ino, &local_index); | ||
4612 | } | ||
4613 | return ret; | ||
4586 | } | 4614 | } |
4587 | 4615 | ||
4588 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | 4616 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, |
@@ -6401,18 +6429,23 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
6401 | unsigned long zero_start; | 6429 | unsigned long zero_start; |
6402 | loff_t size; | 6430 | loff_t size; |
6403 | int ret; | 6431 | int ret; |
6432 | int reserved = 0; | ||
6404 | u64 page_start; | 6433 | u64 page_start; |
6405 | u64 page_end; | 6434 | u64 page_end; |
6406 | 6435 | ||
6407 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | 6436 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
6408 | if (!ret) | 6437 | if (!ret) { |
6409 | ret = btrfs_update_time(vma->vm_file); | 6438 | ret = btrfs_update_time(vma->vm_file); |
6439 | reserved = 1; | ||
6440 | } | ||
6410 | if (ret) { | 6441 | if (ret) { |
6411 | if (ret == -ENOMEM) | 6442 | if (ret == -ENOMEM) |
6412 | ret = VM_FAULT_OOM; | 6443 | ret = VM_FAULT_OOM; |
6413 | else /* -ENOSPC, -EIO, etc */ | 6444 | else /* -ENOSPC, -EIO, etc */ |
6414 | ret = VM_FAULT_SIGBUS; | 6445 | ret = VM_FAULT_SIGBUS; |
6415 | goto out; | 6446 | if (reserved) |
6447 | goto out; | ||
6448 | goto out_noreserve; | ||
6416 | } | 6449 | } |
6417 | 6450 | ||
6418 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ | 6451 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ |
@@ -6495,6 +6528,7 @@ out_unlock: | |||
6495 | unlock_page(page); | 6528 | unlock_page(page); |
6496 | out: | 6529 | out: |
6497 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | 6530 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
6531 | out_noreserve: | ||
6498 | return ret; | 6532 | return ret; |
6499 | } | 6533 | } |
6500 | 6534 | ||
@@ -6690,8 +6724,10 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |||
6690 | int err; | 6724 | int err; |
6691 | u64 index = 0; | 6725 | u64 index = 0; |
6692 | 6726 | ||
6693 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, | 6727 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, |
6694 | new_dirid, S_IFDIR | 0700, &index); | 6728 | new_dirid, new_dirid, |
6729 | S_IFDIR | (~current_umask() & S_IRWXUGO), | ||
6730 | &index); | ||
6695 | if (IS_ERR(inode)) | 6731 | if (IS_ERR(inode)) |
6696 | return PTR_ERR(inode); | 6732 | return PTR_ERR(inode); |
6697 | inode->i_op = &btrfs_dir_inode_operations; | 6733 | inode->i_op = &btrfs_dir_inode_operations; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ab620014bcc3..d8b54715c2de 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -861,6 +861,7 @@ static int cluster_pages_for_defrag(struct inode *inode, | |||
861 | int i_done; | 861 | int i_done; |
862 | struct btrfs_ordered_extent *ordered; | 862 | struct btrfs_ordered_extent *ordered; |
863 | struct extent_state *cached_state = NULL; | 863 | struct extent_state *cached_state = NULL; |
864 | struct extent_io_tree *tree; | ||
864 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); | 865 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); |
865 | 866 | ||
866 | if (isize == 0) | 867 | if (isize == 0) |
@@ -871,18 +872,34 @@ static int cluster_pages_for_defrag(struct inode *inode, | |||
871 | num_pages << PAGE_CACHE_SHIFT); | 872 | num_pages << PAGE_CACHE_SHIFT); |
872 | if (ret) | 873 | if (ret) |
873 | return ret; | 874 | return ret; |
874 | again: | ||
875 | ret = 0; | ||
876 | i_done = 0; | 875 | i_done = 0; |
876 | tree = &BTRFS_I(inode)->io_tree; | ||
877 | 877 | ||
878 | /* step one, lock all the pages */ | 878 | /* step one, lock all the pages */ |
879 | for (i = 0; i < num_pages; i++) { | 879 | for (i = 0; i < num_pages; i++) { |
880 | struct page *page; | 880 | struct page *page; |
881 | again: | ||
881 | page = find_or_create_page(inode->i_mapping, | 882 | page = find_or_create_page(inode->i_mapping, |
882 | start_index + i, mask); | 883 | start_index + i, mask); |
883 | if (!page) | 884 | if (!page) |
884 | break; | 885 | break; |
885 | 886 | ||
887 | page_start = page_offset(page); | ||
888 | page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
889 | while (1) { | ||
890 | lock_extent(tree, page_start, page_end, GFP_NOFS); | ||
891 | ordered = btrfs_lookup_ordered_extent(inode, | ||
892 | page_start); | ||
893 | unlock_extent(tree, page_start, page_end, GFP_NOFS); | ||
894 | if (!ordered) | ||
895 | break; | ||
896 | |||
897 | unlock_page(page); | ||
898 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
899 | btrfs_put_ordered_extent(ordered); | ||
900 | lock_page(page); | ||
901 | } | ||
902 | |||
886 | if (!PageUptodate(page)) { | 903 | if (!PageUptodate(page)) { |
887 | btrfs_readpage(NULL, page); | 904 | btrfs_readpage(NULL, page); |
888 | lock_page(page); | 905 | lock_page(page); |
@@ -893,15 +910,22 @@ again: | |||
893 | break; | 910 | break; |
894 | } | 911 | } |
895 | } | 912 | } |
913 | |||
896 | isize = i_size_read(inode); | 914 | isize = i_size_read(inode); |
897 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; | 915 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; |
898 | if (!isize || page->index > file_end || | 916 | if (!isize || page->index > file_end) { |
899 | page->mapping != inode->i_mapping) { | ||
900 | /* whoops, we blew past eof, skip this page */ | 917 | /* whoops, we blew past eof, skip this page */ |
901 | unlock_page(page); | 918 | unlock_page(page); |
902 | page_cache_release(page); | 919 | page_cache_release(page); |
903 | break; | 920 | break; |
904 | } | 921 | } |
922 | |||
923 | if (page->mapping != inode->i_mapping) { | ||
924 | unlock_page(page); | ||
925 | page_cache_release(page); | ||
926 | goto again; | ||
927 | } | ||
928 | |||
905 | pages[i] = page; | 929 | pages[i] = page; |
906 | i_done++; | 930 | i_done++; |
907 | } | 931 | } |
@@ -924,25 +948,6 @@ again: | |||
924 | lock_extent_bits(&BTRFS_I(inode)->io_tree, | 948 | lock_extent_bits(&BTRFS_I(inode)->io_tree, |
925 | page_start, page_end - 1, 0, &cached_state, | 949 | page_start, page_end - 1, 0, &cached_state, |
926 | GFP_NOFS); | 950 | GFP_NOFS); |
927 | ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); | ||
928 | if (ordered && | ||
929 | ordered->file_offset + ordered->len > page_start && | ||
930 | ordered->file_offset < page_end) { | ||
931 | btrfs_put_ordered_extent(ordered); | ||
932 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | ||
933 | page_start, page_end - 1, | ||
934 | &cached_state, GFP_NOFS); | ||
935 | for (i = 0; i < i_done; i++) { | ||
936 | unlock_page(pages[i]); | ||
937 | page_cache_release(pages[i]); | ||
938 | } | ||
939 | btrfs_wait_ordered_range(inode, page_start, | ||
940 | page_end - page_start); | ||
941 | goto again; | ||
942 | } | ||
943 | if (ordered) | ||
944 | btrfs_put_ordered_extent(ordered); | ||
945 | |||
946 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, | 951 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, |
947 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | 952 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | |
948 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | 953 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, |
@@ -1065,7 +1070,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1065 | i = range->start >> PAGE_CACHE_SHIFT; | 1070 | i = range->start >> PAGE_CACHE_SHIFT; |
1066 | } | 1071 | } |
1067 | if (!max_to_defrag) | 1072 | if (!max_to_defrag) |
1068 | max_to_defrag = last_index; | 1073 | max_to_defrag = last_index + 1; |
1069 | 1074 | ||
1070 | /* | 1075 | /* |
1071 | * make writeback starts from i, so the defrag range can be | 1076 | * make writeback starts from i, so the defrag range can be |
@@ -1327,6 +1332,12 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, | |||
1327 | goto out; | 1332 | goto out; |
1328 | } | 1333 | } |
1329 | 1334 | ||
1335 | if (name[0] == '.' && | ||
1336 | (namelen == 1 || (name[1] == '.' && namelen == 2))) { | ||
1337 | ret = -EEXIST; | ||
1338 | goto out; | ||
1339 | } | ||
1340 | |||
1330 | if (subvol) { | 1341 | if (subvol) { |
1331 | ret = btrfs_mksubvol(&file->f_path, name, namelen, | 1342 | ret = btrfs_mksubvol(&file->f_path, name, namelen, |
1332 | NULL, transid, readonly); | 1343 | NULL, transid, readonly); |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 2373b39a132b..22db04550f6a 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c | |||
@@ -305,7 +305,7 @@ again: | |||
305 | 305 | ||
306 | spin_lock(&fs_info->reada_lock); | 306 | spin_lock(&fs_info->reada_lock); |
307 | ret = radix_tree_insert(&dev->reada_zones, | 307 | ret = radix_tree_insert(&dev->reada_zones, |
308 | (unsigned long)zone->end >> PAGE_CACHE_SHIFT, | 308 | (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), |
309 | zone); | 309 | zone); |
310 | spin_unlock(&fs_info->reada_lock); | 310 | spin_unlock(&fs_info->reada_lock); |
311 | 311 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9770cc5bfb76..abc0fbffa510 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1367,7 +1367,8 @@ out: | |||
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | 1369 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, |
1370 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) | 1370 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length, |
1371 | u64 dev_offset) | ||
1371 | { | 1372 | { |
1372 | struct btrfs_mapping_tree *map_tree = | 1373 | struct btrfs_mapping_tree *map_tree = |
1373 | &sdev->dev->dev_root->fs_info->mapping_tree; | 1374 | &sdev->dev->dev_root->fs_info->mapping_tree; |
@@ -1391,7 +1392,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | |||
1391 | goto out; | 1392 | goto out; |
1392 | 1393 | ||
1393 | for (i = 0; i < map->num_stripes; ++i) { | 1394 | for (i = 0; i < map->num_stripes; ++i) { |
1394 | if (map->stripes[i].dev == sdev->dev) { | 1395 | if (map->stripes[i].dev == sdev->dev && |
1396 | map->stripes[i].physical == dev_offset) { | ||
1395 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); | 1397 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); |
1396 | if (ret) | 1398 | if (ret) |
1397 | goto out; | 1399 | goto out; |
@@ -1487,7 +1489,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1487 | break; | 1489 | break; |
1488 | } | 1490 | } |
1489 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | 1491 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, |
1490 | chunk_offset, length); | 1492 | chunk_offset, length, found_key.offset); |
1491 | btrfs_put_block_group(cache); | 1493 | btrfs_put_block_group(cache); |
1492 | if (ret) | 1494 | if (ret) |
1493 | break; | 1495 | break; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 287a6728b1ad..04b77e3ceb7a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -327,7 +327,8 @@ again: | |||
327 | 327 | ||
328 | if (num_bytes) { | 328 | if (num_bytes) { |
329 | trace_btrfs_space_reservation(root->fs_info, "transaction", | 329 | trace_btrfs_space_reservation(root->fs_info, "transaction", |
330 | (u64)h, num_bytes, 1); | 330 | (u64)(unsigned long)h, |
331 | num_bytes, 1); | ||
331 | h->block_rsv = &root->fs_info->trans_block_rsv; | 332 | h->block_rsv = &root->fs_info->trans_block_rsv; |
332 | h->bytes_reserved = num_bytes; | 333 | h->bytes_reserved = num_bytes; |
333 | } | 334 | } |
@@ -915,7 +916,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
915 | dentry->d_name.name, dentry->d_name.len, | 916 | dentry->d_name.name, dentry->d_name.len, |
916 | parent_inode, &key, | 917 | parent_inode, &key, |
917 | BTRFS_FT_DIR, index); | 918 | BTRFS_FT_DIR, index); |
918 | BUG_ON(ret); | 919 | if (ret) { |
920 | pending->error = -EEXIST; | ||
921 | dput(parent); | ||
922 | goto fail; | ||
923 | } | ||
919 | 924 | ||
920 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | 925 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
921 | dentry->d_name.len * 2); | 926 | dentry->d_name.len * 2); |
@@ -993,12 +998,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, | |||
993 | { | 998 | { |
994 | struct btrfs_pending_snapshot *pending; | 999 | struct btrfs_pending_snapshot *pending; |
995 | struct list_head *head = &trans->transaction->pending_snapshots; | 1000 | struct list_head *head = &trans->transaction->pending_snapshots; |
996 | int ret; | ||
997 | 1001 | ||
998 | list_for_each_entry(pending, head, list) { | 1002 | list_for_each_entry(pending, head, list) |
999 | ret = create_pending_snapshot(trans, fs_info, pending); | 1003 | create_pending_snapshot(trans, fs_info, pending); |
1000 | BUG_ON(ret); | ||
1001 | } | ||
1002 | return 0; | 1004 | return 0; |
1003 | } | 1005 | } |
1004 | 1006 | ||
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cb877e0886a7..966cc74f5d6c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -1957,7 +1957,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans, | |||
1957 | 1957 | ||
1958 | finish_wait(&root->log_commit_wait[index], &wait); | 1958 | finish_wait(&root->log_commit_wait[index], &wait); |
1959 | mutex_lock(&root->log_mutex); | 1959 | mutex_lock(&root->log_mutex); |
1960 | } while (root->log_transid < transid + 2 && | 1960 | } while (root->fs_info->last_trans_log_full_commit != |
1961 | trans->transid && root->log_transid < transid + 2 && | ||
1961 | atomic_read(&root->log_commit[index])); | 1962 | atomic_read(&root->log_commit[index])); |
1962 | return 0; | 1963 | return 0; |
1963 | } | 1964 | } |
@@ -1966,7 +1967,8 @@ static int wait_for_writer(struct btrfs_trans_handle *trans, | |||
1966 | struct btrfs_root *root) | 1967 | struct btrfs_root *root) |
1967 | { | 1968 | { |
1968 | DEFINE_WAIT(wait); | 1969 | DEFINE_WAIT(wait); |
1969 | while (atomic_read(&root->log_writers)) { | 1970 | while (root->fs_info->last_trans_log_full_commit != |
1971 | trans->transid && atomic_read(&root->log_writers)) { | ||
1970 | prepare_to_wait(&root->log_writer_wait, | 1972 | prepare_to_wait(&root->log_writer_wait, |
1971 | &wait, TASK_UNINTERRUPTIBLE); | 1973 | &wait, TASK_UNINTERRUPTIBLE); |
1972 | mutex_unlock(&root->log_mutex); | 1974 | mutex_unlock(&root->log_mutex); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0b4e2af7954d..ef41f285a475 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -459,12 +459,23 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) | |||
459 | { | 459 | { |
460 | struct btrfs_device *device, *next; | 460 | struct btrfs_device *device, *next; |
461 | 461 | ||
462 | struct block_device *latest_bdev = NULL; | ||
463 | u64 latest_devid = 0; | ||
464 | u64 latest_transid = 0; | ||
465 | |||
462 | mutex_lock(&uuid_mutex); | 466 | mutex_lock(&uuid_mutex); |
463 | again: | 467 | again: |
464 | /* This is the initialized path, it is safe to release the devices. */ | 468 | /* This is the initialized path, it is safe to release the devices. */ |
465 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { | 469 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
466 | if (device->in_fs_metadata) | 470 | if (device->in_fs_metadata) { |
471 | if (!latest_transid || | ||
472 | device->generation > latest_transid) { | ||
473 | latest_devid = device->devid; | ||
474 | latest_transid = device->generation; | ||
475 | latest_bdev = device->bdev; | ||
476 | } | ||
467 | continue; | 477 | continue; |
478 | } | ||
468 | 479 | ||
469 | if (device->bdev) { | 480 | if (device->bdev) { |
470 | blkdev_put(device->bdev, device->mode); | 481 | blkdev_put(device->bdev, device->mode); |
@@ -487,6 +498,10 @@ again: | |||
487 | goto again; | 498 | goto again; |
488 | } | 499 | } |
489 | 500 | ||
501 | fs_devices->latest_bdev = latest_bdev; | ||
502 | fs_devices->latest_devid = latest_devid; | ||
503 | fs_devices->latest_trans = latest_transid; | ||
504 | |||
490 | mutex_unlock(&uuid_mutex); | 505 | mutex_unlock(&uuid_mutex); |
491 | return 0; | 506 | return 0; |
492 | } | 507 | } |
@@ -1953,7 +1968,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, | |||
1953 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); | 1968 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); |
1954 | read_unlock(&em_tree->lock); | 1969 | read_unlock(&em_tree->lock); |
1955 | 1970 | ||
1956 | BUG_ON(em->start > chunk_offset || | 1971 | BUG_ON(!em || em->start > chunk_offset || |
1957 | em->start + em->len < chunk_offset); | 1972 | em->start + em->len < chunk_offset); |
1958 | map = (struct map_lookup *)em->bdev; | 1973 | map = (struct map_lookup *)em->bdev; |
1959 | 1974 | ||
@@ -4356,6 +4371,20 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
4356 | return -ENOMEM; | 4371 | return -ENOMEM; |
4357 | btrfs_set_buffer_uptodate(sb); | 4372 | btrfs_set_buffer_uptodate(sb); |
4358 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); | 4373 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); |
4374 | /* | ||
4375 | * The sb extent buffer is artifical and just used to read the system array. | ||
4376 | * btrfs_set_buffer_uptodate() call does not properly mark all it's | ||
4377 | * pages up-to-date when the page is larger: extent does not cover the | ||
4378 | * whole page and consequently check_page_uptodate does not find all | ||
4379 | * the page's extents up-to-date (the hole beyond sb), | ||
4380 | * write_extent_buffer then triggers a WARN_ON. | ||
4381 | * | ||
4382 | * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, | ||
4383 | * but sb spans only this function. Add an explicit SetPageUptodate call | ||
4384 | * to silence the warning eg. on PowerPC 64. | ||
4385 | */ | ||
4386 | if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) | ||
4387 | SetPageUptodate(sb->first_page); | ||
4359 | 4388 | ||
4360 | write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); | 4389 | write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); |
4361 | array_size = btrfs_super_sys_array_size(super_copy); | 4390 | array_size = btrfs_super_sys_array_size(super_copy); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b60fc8bfb3e9..620daad201db 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -641,10 +641,10 @@ static int __cap_is_valid(struct ceph_cap *cap) | |||
641 | unsigned long ttl; | 641 | unsigned long ttl; |
642 | u32 gen; | 642 | u32 gen; |
643 | 643 | ||
644 | spin_lock(&cap->session->s_cap_lock); | 644 | spin_lock(&cap->session->s_gen_ttl_lock); |
645 | gen = cap->session->s_cap_gen; | 645 | gen = cap->session->s_cap_gen; |
646 | ttl = cap->session->s_cap_ttl; | 646 | ttl = cap->session->s_cap_ttl; |
647 | spin_unlock(&cap->session->s_cap_lock); | 647 | spin_unlock(&cap->session->s_gen_ttl_lock); |
648 | 648 | ||
649 | if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { | 649 | if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { |
650 | dout("__cap_is_valid %p cap %p issued %s " | 650 | dout("__cap_is_valid %p cap %p issued %s " |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 618246bc2196..3e8094be4604 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -975,10 +975,10 @@ static int dentry_lease_is_valid(struct dentry *dentry) | |||
975 | di = ceph_dentry(dentry); | 975 | di = ceph_dentry(dentry); |
976 | if (di->lease_session) { | 976 | if (di->lease_session) { |
977 | s = di->lease_session; | 977 | s = di->lease_session; |
978 | spin_lock(&s->s_cap_lock); | 978 | spin_lock(&s->s_gen_ttl_lock); |
979 | gen = s->s_cap_gen; | 979 | gen = s->s_cap_gen; |
980 | ttl = s->s_cap_ttl; | 980 | ttl = s->s_cap_ttl; |
981 | spin_unlock(&s->s_cap_lock); | 981 | spin_unlock(&s->s_gen_ttl_lock); |
982 | 982 | ||
983 | if (di->lease_gen == gen && | 983 | if (di->lease_gen == gen && |
984 | time_before(jiffies, dentry->d_time) && | 984 | time_before(jiffies, dentry->d_time) && |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 23ab6a3f1825..866e8d7ca37d 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -262,6 +262,7 @@ static int parse_reply_info(struct ceph_msg *msg, | |||
262 | /* trace */ | 262 | /* trace */ |
263 | ceph_decode_32_safe(&p, end, len, bad); | 263 | ceph_decode_32_safe(&p, end, len, bad); |
264 | if (len > 0) { | 264 | if (len > 0) { |
265 | ceph_decode_need(&p, end, len, bad); | ||
265 | err = parse_reply_info_trace(&p, p+len, info, features); | 266 | err = parse_reply_info_trace(&p, p+len, info, features); |
266 | if (err < 0) | 267 | if (err < 0) |
267 | goto out_bad; | 268 | goto out_bad; |
@@ -270,6 +271,7 @@ static int parse_reply_info(struct ceph_msg *msg, | |||
270 | /* extra */ | 271 | /* extra */ |
271 | ceph_decode_32_safe(&p, end, len, bad); | 272 | ceph_decode_32_safe(&p, end, len, bad); |
272 | if (len > 0) { | 273 | if (len > 0) { |
274 | ceph_decode_need(&p, end, len, bad); | ||
273 | err = parse_reply_info_extra(&p, p+len, info, features); | 275 | err = parse_reply_info_extra(&p, p+len, info, features); |
274 | if (err < 0) | 276 | if (err < 0) |
275 | goto out_bad; | 277 | goto out_bad; |
@@ -398,9 +400,11 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |||
398 | s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; | 400 | s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; |
399 | s->s_con.peer_name.num = cpu_to_le64(mds); | 401 | s->s_con.peer_name.num = cpu_to_le64(mds); |
400 | 402 | ||
401 | spin_lock_init(&s->s_cap_lock); | 403 | spin_lock_init(&s->s_gen_ttl_lock); |
402 | s->s_cap_gen = 0; | 404 | s->s_cap_gen = 0; |
403 | s->s_cap_ttl = 0; | 405 | s->s_cap_ttl = 0; |
406 | |||
407 | spin_lock_init(&s->s_cap_lock); | ||
404 | s->s_renew_requested = 0; | 408 | s->s_renew_requested = 0; |
405 | s->s_renew_seq = 0; | 409 | s->s_renew_seq = 0; |
406 | INIT_LIST_HEAD(&s->s_caps); | 410 | INIT_LIST_HEAD(&s->s_caps); |
@@ -2326,10 +2330,10 @@ static void handle_session(struct ceph_mds_session *session, | |||
2326 | case CEPH_SESSION_STALE: | 2330 | case CEPH_SESSION_STALE: |
2327 | pr_info("mds%d caps went stale, renewing\n", | 2331 | pr_info("mds%d caps went stale, renewing\n", |
2328 | session->s_mds); | 2332 | session->s_mds); |
2329 | spin_lock(&session->s_cap_lock); | 2333 | spin_lock(&session->s_gen_ttl_lock); |
2330 | session->s_cap_gen++; | 2334 | session->s_cap_gen++; |
2331 | session->s_cap_ttl = 0; | 2335 | session->s_cap_ttl = 0; |
2332 | spin_unlock(&session->s_cap_lock); | 2336 | spin_unlock(&session->s_gen_ttl_lock); |
2333 | send_renew_caps(mdsc, session); | 2337 | send_renew_caps(mdsc, session); |
2334 | break; | 2338 | break; |
2335 | 2339 | ||
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index a50ca0e39475..8c7c04ebb595 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -117,10 +117,13 @@ struct ceph_mds_session { | |||
117 | void *s_authorizer_buf, *s_authorizer_reply_buf; | 117 | void *s_authorizer_buf, *s_authorizer_reply_buf; |
118 | size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; | 118 | size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; |
119 | 119 | ||
120 | /* protected by s_cap_lock */ | 120 | /* protected by s_gen_ttl_lock */ |
121 | spinlock_t s_cap_lock; | 121 | spinlock_t s_gen_ttl_lock; |
122 | u32 s_cap_gen; /* inc each time we get mds stale msg */ | 122 | u32 s_cap_gen; /* inc each time we get mds stale msg */ |
123 | unsigned long s_cap_ttl; /* when session caps expire */ | 123 | unsigned long s_cap_ttl; /* when session caps expire */ |
124 | |||
125 | /* protected by s_cap_lock */ | ||
126 | spinlock_t s_cap_lock; | ||
124 | struct list_head s_caps; /* all caps issued by this session */ | 127 | struct list_head s_caps; /* all caps issued by this session */ |
125 | int s_nr_caps, s_trim_caps; | 128 | int s_nr_caps, s_trim_caps; |
126 | int s_num_cap_releases; | 129 | int s_num_cap_releases; |
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 857214ae8c08..a76f697303d9 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c | |||
@@ -111,8 +111,10 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static struct ceph_vxattr_cb ceph_file_vxattrs[] = { | 113 | static struct ceph_vxattr_cb ceph_file_vxattrs[] = { |
114 | { true, "ceph.file.layout", ceph_vxattrcb_layout}, | ||
115 | /* The following extended attribute name is deprecated */ | ||
114 | { true, "ceph.layout", ceph_vxattrcb_layout}, | 116 | { true, "ceph.layout", ceph_vxattrcb_layout}, |
115 | { NULL, NULL } | 117 | { true, NULL, NULL } |
116 | }; | 118 | }; |
117 | 119 | ||
118 | static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode) | 120 | static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode) |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index f66cc1625150..2b243af70aa3 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -139,8 +139,7 @@ config CIFS_DFS_UPCALL | |||
139 | points. If unsure, say N. | 139 | points. If unsure, say N. |
140 | 140 | ||
141 | config CIFS_FSCACHE | 141 | config CIFS_FSCACHE |
142 | bool "Provide CIFS client caching support (EXPERIMENTAL)" | 142 | bool "Provide CIFS client caching support" |
143 | depends on EXPERIMENTAL | ||
144 | depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y | 143 | depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y |
145 | help | 144 | help |
146 | Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data | 145 | Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data |
@@ -148,8 +147,8 @@ config CIFS_FSCACHE | |||
148 | manager. If unsure, say N. | 147 | manager. If unsure, say N. |
149 | 148 | ||
150 | config CIFS_ACL | 149 | config CIFS_ACL |
151 | bool "Provide CIFS ACL support (EXPERIMENTAL)" | 150 | bool "Provide CIFS ACL support" |
152 | depends on EXPERIMENTAL && CIFS_XATTR && KEYS | 151 | depends on CIFS_XATTR && KEYS |
153 | help | 152 | help |
154 | Allows to fetch CIFS/NTFS ACL from the server. The DACL blob | 153 | Allows to fetch CIFS/NTFS ACL from the server. The DACL blob |
155 | is handed over to the application/caller. | 154 | is handed over to the application/caller. |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 84e8c0724704..24b3dfc05282 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -676,14 +676,23 @@ static ssize_t cifs_multiuser_mount_proc_write(struct file *file, | |||
676 | { | 676 | { |
677 | char c; | 677 | char c; |
678 | int rc; | 678 | int rc; |
679 | static bool warned; | ||
679 | 680 | ||
680 | rc = get_user(c, buffer); | 681 | rc = get_user(c, buffer); |
681 | if (rc) | 682 | if (rc) |
682 | return rc; | 683 | return rc; |
683 | if (c == '0' || c == 'n' || c == 'N') | 684 | if (c == '0' || c == 'n' || c == 'N') |
684 | multiuser_mount = 0; | 685 | multiuser_mount = 0; |
685 | else if (c == '1' || c == 'y' || c == 'Y') | 686 | else if (c == '1' || c == 'y' || c == 'Y') { |
686 | multiuser_mount = 1; | 687 | multiuser_mount = 1; |
688 | if (!warned) { | ||
689 | warned = true; | ||
690 | printk(KERN_WARNING "CIFS VFS: The legacy multiuser " | ||
691 | "mount code is scheduled to be deprecated in " | ||
692 | "3.5. Please switch to using the multiuser " | ||
693 | "mount option."); | ||
694 | } | ||
695 | } | ||
687 | 696 | ||
688 | return count; | 697 | return count; |
689 | } | 698 | } |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 2272fd5fe5b7..e622863b292f 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -113,9 +113,11 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo) | |||
113 | MAX_MECH_STR_LEN + | 113 | MAX_MECH_STR_LEN + |
114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + | 114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + |
115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + | 115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + |
116 | USER_KEY_LEN + strlen(sesInfo->user_name) + | ||
117 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; | 116 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; |
118 | 117 | ||
118 | if (sesInfo->user_name) | ||
119 | desc_len += USER_KEY_LEN + strlen(sesInfo->user_name); | ||
120 | |||
119 | spnego_key = ERR_PTR(-ENOMEM); | 121 | spnego_key = ERR_PTR(-ENOMEM); |
120 | description = kzalloc(desc_len, GFP_KERNEL); | 122 | description = kzalloc(desc_len, GFP_KERNEL); |
121 | if (description == NULL) | 123 | if (description == NULL) |
@@ -152,8 +154,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo) | |||
152 | dp = description + strlen(description); | 154 | dp = description + strlen(description); |
153 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); | 155 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); |
154 | 156 | ||
155 | dp = description + strlen(description); | 157 | if (sesInfo->user_name) { |
156 | sprintf(dp, ";user=%s", sesInfo->user_name); | 158 | dp = description + strlen(description); |
159 | sprintf(dp, ";user=%s", sesInfo->user_name); | ||
160 | } | ||
157 | 161 | ||
158 | dp = description + strlen(description); | 162 | dp = description + strlen(description); |
159 | sprintf(dp, ";pid=0x%x", current->pid); | 163 | sprintf(dp, ";pid=0x%x", current->pid); |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 1b2e180b018d..fbb9da951843 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -27,17 +27,17 @@ | |||
27 | #include "cifs_debug.h" | 27 | #include "cifs_debug.h" |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * cifs_ucs2_bytes - how long will a string be after conversion? | 30 | * cifs_utf16_bytes - how long will a string be after conversion? |
31 | * @ucs - pointer to input string | 31 | * @utf16 - pointer to input string |
32 | * @maxbytes - don't go past this many bytes of input string | 32 | * @maxbytes - don't go past this many bytes of input string |
33 | * @codepage - destination codepage | 33 | * @codepage - destination codepage |
34 | * | 34 | * |
35 | * Walk a ucs2le string and return the number of bytes that the string will | 35 | * Walk a utf16le string and return the number of bytes that the string will |
36 | * be after being converted to the given charset, not including any null | 36 | * be after being converted to the given charset, not including any null |
37 | * termination required. Don't walk past maxbytes in the source buffer. | 37 | * termination required. Don't walk past maxbytes in the source buffer. |
38 | */ | 38 | */ |
39 | int | 39 | int |
40 | cifs_ucs2_bytes(const __le16 *from, int maxbytes, | 40 | cifs_utf16_bytes(const __le16 *from, int maxbytes, |
41 | const struct nls_table *codepage) | 41 | const struct nls_table *codepage) |
42 | { | 42 | { |
43 | int i; | 43 | int i; |
@@ -122,7 +122,7 @@ cp_convert: | |||
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * cifs_from_ucs2 - convert utf16le string to local charset | 125 | * cifs_from_utf16 - convert utf16le string to local charset |
126 | * @to - destination buffer | 126 | * @to - destination buffer |
127 | * @from - source buffer | 127 | * @from - source buffer |
128 | * @tolen - destination buffer size (in bytes) | 128 | * @tolen - destination buffer size (in bytes) |
@@ -130,7 +130,7 @@ cp_convert: | |||
130 | * @codepage - codepage to which characters should be converted | 130 | * @codepage - codepage to which characters should be converted |
131 | * @mapchar - should characters be remapped according to the mapchars option? | 131 | * @mapchar - should characters be remapped according to the mapchars option? |
132 | * | 132 | * |
133 | * Convert a little-endian ucs2le string (as sent by the server) to a string | 133 | * Convert a little-endian utf16le string (as sent by the server) to a string |
134 | * in the provided codepage. The tolen and fromlen parameters are to ensure | 134 | * in the provided codepage. The tolen and fromlen parameters are to ensure |
135 | * that the code doesn't walk off of the end of the buffer (which is always | 135 | * that the code doesn't walk off of the end of the buffer (which is always |
136 | * a danger if the alignment of the source buffer is off). The destination | 136 | * a danger if the alignment of the source buffer is off). The destination |
@@ -139,12 +139,12 @@ cp_convert: | |||
139 | * null terminator). | 139 | * null terminator). |
140 | * | 140 | * |
141 | * Note that some windows versions actually send multiword UTF-16 characters | 141 | * Note that some windows versions actually send multiword UTF-16 characters |
142 | * instead of straight UCS-2. The linux nls routines however aren't able to | 142 | * instead of straight UTF16-2. The linux nls routines however aren't able to |
143 | * deal with those characters properly. In the event that we get some of | 143 | * deal with those characters properly. In the event that we get some of |
144 | * those characters, they won't be translated properly. | 144 | * those characters, they won't be translated properly. |
145 | */ | 145 | */ |
146 | int | 146 | int |
147 | cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, | 147 | cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, |
148 | const struct nls_table *codepage, bool mapchar) | 148 | const struct nls_table *codepage, bool mapchar) |
149 | { | 149 | { |
150 | int i, charlen, safelen; | 150 | int i, charlen, safelen; |
@@ -190,13 +190,13 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, | |||
190 | } | 190 | } |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * NAME: cifs_strtoUCS() | 193 | * NAME: cifs_strtoUTF16() |
194 | * | 194 | * |
195 | * FUNCTION: Convert character string to unicode string | 195 | * FUNCTION: Convert character string to unicode string |
196 | * | 196 | * |
197 | */ | 197 | */ |
198 | int | 198 | int |
199 | cifs_strtoUCS(__le16 *to, const char *from, int len, | 199 | cifs_strtoUTF16(__le16 *to, const char *from, int len, |
200 | const struct nls_table *codepage) | 200 | const struct nls_table *codepage) |
201 | { | 201 | { |
202 | int charlen; | 202 | int charlen; |
@@ -206,7 +206,7 @@ cifs_strtoUCS(__le16 *to, const char *from, int len, | |||
206 | for (i = 0; len && *from; i++, from += charlen, len -= charlen) { | 206 | for (i = 0; len && *from; i++, from += charlen, len -= charlen) { |
207 | charlen = codepage->char2uni(from, len, &wchar_to); | 207 | charlen = codepage->char2uni(from, len, &wchar_to); |
208 | if (charlen < 1) { | 208 | if (charlen < 1) { |
209 | cERROR(1, "strtoUCS: char2uni of 0x%x returned %d", | 209 | cERROR(1, "strtoUTF16: char2uni of 0x%x returned %d", |
210 | *from, charlen); | 210 | *from, charlen); |
211 | /* A question mark */ | 211 | /* A question mark */ |
212 | wchar_to = 0x003f; | 212 | wchar_to = 0x003f; |
@@ -220,7 +220,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * cifs_strndup_from_ucs - copy a string from wire format to the local codepage | 223 | * cifs_strndup_from_utf16 - copy a string from wire format to the local |
224 | * codepage | ||
224 | * @src - source string | 225 | * @src - source string |
225 | * @maxlen - don't walk past this many bytes in the source string | 226 | * @maxlen - don't walk past this many bytes in the source string |
226 | * @is_unicode - is this a unicode string? | 227 | * @is_unicode - is this a unicode string? |
@@ -231,19 +232,19 @@ cifs_strtoUCS(__le16 *to, const char *from, int len, | |||
231 | * error. | 232 | * error. |
232 | */ | 233 | */ |
233 | char * | 234 | char * |
234 | cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, | 235 | cifs_strndup_from_utf16(const char *src, const int maxlen, |
235 | const struct nls_table *codepage) | 236 | const bool is_unicode, const struct nls_table *codepage) |
236 | { | 237 | { |
237 | int len; | 238 | int len; |
238 | char *dst; | 239 | char *dst; |
239 | 240 | ||
240 | if (is_unicode) { | 241 | if (is_unicode) { |
241 | len = cifs_ucs2_bytes((__le16 *) src, maxlen, codepage); | 242 | len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage); |
242 | len += nls_nullsize(codepage); | 243 | len += nls_nullsize(codepage); |
243 | dst = kmalloc(len, GFP_KERNEL); | 244 | dst = kmalloc(len, GFP_KERNEL); |
244 | if (!dst) | 245 | if (!dst) |
245 | return NULL; | 246 | return NULL; |
246 | cifs_from_ucs2(dst, (__le16 *) src, len, maxlen, codepage, | 247 | cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage, |
247 | false); | 248 | false); |
248 | } else { | 249 | } else { |
249 | len = strnlen(src, maxlen); | 250 | len = strnlen(src, maxlen); |
@@ -264,7 +265,7 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, | |||
264 | * names are little endian 16 bit Unicode on the wire | 265 | * names are little endian 16 bit Unicode on the wire |
265 | */ | 266 | */ |
266 | int | 267 | int |
267 | cifsConvertToUCS(__le16 *target, const char *source, int srclen, | 268 | cifsConvertToUTF16(__le16 *target, const char *source, int srclen, |
268 | const struct nls_table *cp, int mapChars) | 269 | const struct nls_table *cp, int mapChars) |
269 | { | 270 | { |
270 | int i, j, charlen; | 271 | int i, j, charlen; |
@@ -273,7 +274,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
273 | wchar_t tmp; | 274 | wchar_t tmp; |
274 | 275 | ||
275 | if (!mapChars) | 276 | if (!mapChars) |
276 | return cifs_strtoUCS(target, source, PATH_MAX, cp); | 277 | return cifs_strtoUTF16(target, source, PATH_MAX, cp); |
277 | 278 | ||
278 | for (i = 0, j = 0; i < srclen; j++) { | 279 | for (i = 0, j = 0; i < srclen; j++) { |
279 | src_char = source[i]; | 280 | src_char = source[i]; |
@@ -281,7 +282,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
281 | switch (src_char) { | 282 | switch (src_char) { |
282 | case 0: | 283 | case 0: |
283 | put_unaligned(0, &target[j]); | 284 | put_unaligned(0, &target[j]); |
284 | goto ctoUCS_out; | 285 | goto ctoUTF16_out; |
285 | case ':': | 286 | case ':': |
286 | dst_char = cpu_to_le16(UNI_COLON); | 287 | dst_char = cpu_to_le16(UNI_COLON); |
287 | break; | 288 | break; |
@@ -326,7 +327,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
326 | put_unaligned(dst_char, &target[j]); | 327 | put_unaligned(dst_char, &target[j]); |
327 | } | 328 | } |
328 | 329 | ||
329 | ctoUCS_out: | 330 | ctoUTF16_out: |
330 | return i; | 331 | return i; |
331 | } | 332 | } |
332 | 333 | ||
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 6d02fd560566..a513a546700b 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h | |||
@@ -74,16 +74,16 @@ extern const struct UniCaseRange CifsUniLowerRange[]; | |||
74 | #endif /* UNIUPR_NOLOWER */ | 74 | #endif /* UNIUPR_NOLOWER */ |
75 | 75 | ||
76 | #ifdef __KERNEL__ | 76 | #ifdef __KERNEL__ |
77 | int cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, | 77 | int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, |
78 | const struct nls_table *codepage, bool mapchar); | 78 | const struct nls_table *codepage, bool mapchar); |
79 | int cifs_ucs2_bytes(const __le16 *from, int maxbytes, | 79 | int cifs_utf16_bytes(const __le16 *from, int maxbytes, |
80 | const struct nls_table *codepage); | 80 | const struct nls_table *codepage); |
81 | int cifs_strtoUCS(__le16 *, const char *, int, const struct nls_table *); | 81 | int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *); |
82 | char *cifs_strndup_from_ucs(const char *src, const int maxlen, | 82 | char *cifs_strndup_from_utf16(const char *src, const int maxlen, |
83 | const bool is_unicode, | 83 | const bool is_unicode, |
84 | const struct nls_table *codepage); | 84 | const struct nls_table *codepage); |
85 | extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | 85 | extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen, |
86 | const struct nls_table *cp, int mapChars); | 86 | const struct nls_table *cp, int mapChars); |
87 | 87 | ||
88 | #endif | 88 | #endif |
89 | 89 | ||
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 72ddf23ef6f7..c1b254487388 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -909,6 +909,8 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
909 | umode_t group_mask = S_IRWXG; | 909 | umode_t group_mask = S_IRWXG; |
910 | umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO; | 910 | umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO; |
911 | 911 | ||
912 | if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *)) | ||
913 | return; | ||
912 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), | 914 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), |
913 | GFP_KERNEL); | 915 | GFP_KERNEL); |
914 | if (!ppace) { | 916 | if (!ppace) { |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 5d9b9acc5fce..63c460e503b6 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -327,7 +327,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
327 | attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); | 327 | attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); |
328 | attrptr->length = cpu_to_le16(2 * dlen); | 328 | attrptr->length = cpu_to_le16(2 * dlen); |
329 | blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); | 329 | blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); |
330 | cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); | 330 | cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp); |
331 | 331 | ||
332 | return 0; | 332 | return 0; |
333 | } | 333 | } |
@@ -376,7 +376,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
376 | kmalloc(attrsize + 1, GFP_KERNEL); | 376 | kmalloc(attrsize + 1, GFP_KERNEL); |
377 | if (!ses->domainName) | 377 | if (!ses->domainName) |
378 | return -ENOMEM; | 378 | return -ENOMEM; |
379 | cifs_from_ucs2(ses->domainName, | 379 | cifs_from_utf16(ses->domainName, |
380 | (__le16 *)blobptr, attrsize, attrsize, | 380 | (__le16 *)blobptr, attrsize, attrsize, |
381 | nls_cp, false); | 381 | nls_cp, false); |
382 | break; | 382 | break; |
@@ -420,15 +420,20 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | |||
420 | } | 420 | } |
421 | 421 | ||
422 | /* convert ses->user_name to unicode and uppercase */ | 422 | /* convert ses->user_name to unicode and uppercase */ |
423 | len = strlen(ses->user_name); | 423 | len = ses->user_name ? strlen(ses->user_name) : 0; |
424 | user = kmalloc(2 + (len * 2), GFP_KERNEL); | 424 | user = kmalloc(2 + (len * 2), GFP_KERNEL); |
425 | if (user == NULL) { | 425 | if (user == NULL) { |
426 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); | 426 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); |
427 | rc = -ENOMEM; | 427 | rc = -ENOMEM; |
428 | return rc; | 428 | return rc; |
429 | } | 429 | } |
430 | len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp); | 430 | |
431 | UniStrupr(user); | 431 | if (len) { |
432 | len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp); | ||
433 | UniStrupr(user); | ||
434 | } else { | ||
435 | memset(user, '\0', 2); | ||
436 | } | ||
432 | 437 | ||
433 | rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, | 438 | rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
434 | (char *)user, 2 * len); | 439 | (char *)user, 2 * len); |
@@ -448,8 +453,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | |||
448 | rc = -ENOMEM; | 453 | rc = -ENOMEM; |
449 | return rc; | 454 | return rc; |
450 | } | 455 | } |
451 | len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, | 456 | len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len, |
452 | nls_cp); | 457 | nls_cp); |
453 | rc = | 458 | rc = |
454 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, | 459 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
455 | (char *)domain, 2 * len); | 460 | (char *)domain, 2 * len); |
@@ -468,7 +473,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | |||
468 | rc = -ENOMEM; | 473 | rc = -ENOMEM; |
469 | return rc; | 474 | return rc; |
470 | } | 475 | } |
471 | len = cifs_strtoUCS((__le16 *)server, ses->serverName, len, | 476 | len = cifs_strtoUTF16((__le16 *)server, ses->serverName, len, |
472 | nls_cp); | 477 | nls_cp); |
473 | rc = | 478 | rc = |
474 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, | 479 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index ba53c1c6c6cc..76e7d8b6da17 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -879,6 +879,8 @@ require use of the stronger protocol */ | |||
879 | #define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */ | 879 | #define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */ |
880 | #endif /* UPCALL */ | 880 | #endif /* UPCALL */ |
881 | #else /* do not allow weak pw hash */ | 881 | #else /* do not allow weak pw hash */ |
882 | #define CIFSSEC_MUST_LANMAN 0 | ||
883 | #define CIFSSEC_MUST_PLNTXT 0 | ||
882 | #ifdef CONFIG_CIFS_UPCALL | 884 | #ifdef CONFIG_CIFS_UPCALL |
883 | #define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ | 885 | #define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ |
884 | #else | 886 | #else |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6600aa2d2ef3..8b7794c31591 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -821,8 +821,8 @@ PsxDelete: | |||
821 | 821 | ||
822 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 822 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
823 | name_len = | 823 | name_len = |
824 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 824 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
825 | PATH_MAX, nls_codepage, remap); | 825 | PATH_MAX, nls_codepage, remap); |
826 | name_len++; /* trailing null */ | 826 | name_len++; /* trailing null */ |
827 | name_len *= 2; | 827 | name_len *= 2; |
828 | } else { /* BB add path length overrun check */ | 828 | } else { /* BB add path length overrun check */ |
@@ -893,8 +893,8 @@ DelFileRetry: | |||
893 | 893 | ||
894 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 894 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
895 | name_len = | 895 | name_len = |
896 | cifsConvertToUCS((__le16 *) pSMB->fileName, fileName, | 896 | cifsConvertToUTF16((__le16 *) pSMB->fileName, fileName, |
897 | PATH_MAX, nls_codepage, remap); | 897 | PATH_MAX, nls_codepage, remap); |
898 | name_len++; /* trailing null */ | 898 | name_len++; /* trailing null */ |
899 | name_len *= 2; | 899 | name_len *= 2; |
900 | } else { /* BB improve check for buffer overruns BB */ | 900 | } else { /* BB improve check for buffer overruns BB */ |
@@ -938,8 +938,8 @@ RmDirRetry: | |||
938 | return rc; | 938 | return rc; |
939 | 939 | ||
940 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 940 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
941 | name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName, | 941 | name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName, |
942 | PATH_MAX, nls_codepage, remap); | 942 | PATH_MAX, nls_codepage, remap); |
943 | name_len++; /* trailing null */ | 943 | name_len++; /* trailing null */ |
944 | name_len *= 2; | 944 | name_len *= 2; |
945 | } else { /* BB improve check for buffer overruns BB */ | 945 | } else { /* BB improve check for buffer overruns BB */ |
@@ -981,8 +981,8 @@ MkDirRetry: | |||
981 | return rc; | 981 | return rc; |
982 | 982 | ||
983 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 983 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
984 | name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name, | 984 | name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, |
985 | PATH_MAX, nls_codepage, remap); | 985 | PATH_MAX, nls_codepage, remap); |
986 | name_len++; /* trailing null */ | 986 | name_len++; /* trailing null */ |
987 | name_len *= 2; | 987 | name_len *= 2; |
988 | } else { /* BB improve check for buffer overruns BB */ | 988 | } else { /* BB improve check for buffer overruns BB */ |
@@ -1030,8 +1030,8 @@ PsxCreat: | |||
1030 | 1030 | ||
1031 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 1031 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
1032 | name_len = | 1032 | name_len = |
1033 | cifsConvertToUCS((__le16 *) pSMB->FileName, name, | 1033 | cifsConvertToUTF16((__le16 *) pSMB->FileName, name, |
1034 | PATH_MAX, nls_codepage, remap); | 1034 | PATH_MAX, nls_codepage, remap); |
1035 | name_len++; /* trailing null */ | 1035 | name_len++; /* trailing null */ |
1036 | name_len *= 2; | 1036 | name_len *= 2; |
1037 | } else { /* BB improve the check for buffer overruns BB */ | 1037 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -1197,8 +1197,8 @@ OldOpenRetry: | |||
1197 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 1197 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
1198 | count = 1; /* account for one byte pad to word boundary */ | 1198 | count = 1; /* account for one byte pad to word boundary */ |
1199 | name_len = | 1199 | name_len = |
1200 | cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), | 1200 | cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1), |
1201 | fileName, PATH_MAX, nls_codepage, remap); | 1201 | fileName, PATH_MAX, nls_codepage, remap); |
1202 | name_len++; /* trailing null */ | 1202 | name_len++; /* trailing null */ |
1203 | name_len *= 2; | 1203 | name_len *= 2; |
1204 | } else { /* BB improve check for buffer overruns BB */ | 1204 | } else { /* BB improve check for buffer overruns BB */ |
@@ -1304,8 +1304,8 @@ openRetry: | |||
1304 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 1304 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
1305 | count = 1; /* account for one byte pad to word boundary */ | 1305 | count = 1; /* account for one byte pad to word boundary */ |
1306 | name_len = | 1306 | name_len = |
1307 | cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), | 1307 | cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1), |
1308 | fileName, PATH_MAX, nls_codepage, remap); | 1308 | fileName, PATH_MAX, nls_codepage, remap); |
1309 | name_len++; /* trailing null */ | 1309 | name_len++; /* trailing null */ |
1310 | name_len *= 2; | 1310 | name_len *= 2; |
1311 | pSMB->NameLength = cpu_to_le16(name_len); | 1311 | pSMB->NameLength = cpu_to_le16(name_len); |
@@ -2649,16 +2649,16 @@ renameRetry: | |||
2649 | 2649 | ||
2650 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2650 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2651 | name_len = | 2651 | name_len = |
2652 | cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, | 2652 | cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName, |
2653 | PATH_MAX, nls_codepage, remap); | 2653 | PATH_MAX, nls_codepage, remap); |
2654 | name_len++; /* trailing null */ | 2654 | name_len++; /* trailing null */ |
2655 | name_len *= 2; | 2655 | name_len *= 2; |
2656 | pSMB->OldFileName[name_len] = 0x04; /* pad */ | 2656 | pSMB->OldFileName[name_len] = 0x04; /* pad */ |
2657 | /* protocol requires ASCII signature byte on Unicode string */ | 2657 | /* protocol requires ASCII signature byte on Unicode string */ |
2658 | pSMB->OldFileName[name_len + 1] = 0x00; | 2658 | pSMB->OldFileName[name_len + 1] = 0x00; |
2659 | name_len2 = | 2659 | name_len2 = |
2660 | cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], | 2660 | cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], |
2661 | toName, PATH_MAX, nls_codepage, remap); | 2661 | toName, PATH_MAX, nls_codepage, remap); |
2662 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; | 2662 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; |
2663 | name_len2 *= 2; /* convert to bytes */ | 2663 | name_len2 *= 2; /* convert to bytes */ |
2664 | } else { /* BB improve the check for buffer overruns BB */ | 2664 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -2738,10 +2738,12 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon, | |||
2738 | /* unicode only call */ | 2738 | /* unicode only call */ |
2739 | if (target_name == NULL) { | 2739 | if (target_name == NULL) { |
2740 | sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); | 2740 | sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); |
2741 | len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, | 2741 | len_of_str = |
2742 | cifsConvertToUTF16((__le16 *)rename_info->target_name, | ||
2742 | dummy_string, 24, nls_codepage, remap); | 2743 | dummy_string, 24, nls_codepage, remap); |
2743 | } else { | 2744 | } else { |
2744 | len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, | 2745 | len_of_str = |
2746 | cifsConvertToUTF16((__le16 *)rename_info->target_name, | ||
2745 | target_name, PATH_MAX, nls_codepage, | 2747 | target_name, PATH_MAX, nls_codepage, |
2746 | remap); | 2748 | remap); |
2747 | } | 2749 | } |
@@ -2795,17 +2797,17 @@ copyRetry: | |||
2795 | pSMB->Flags = cpu_to_le16(flags & COPY_TREE); | 2797 | pSMB->Flags = cpu_to_le16(flags & COPY_TREE); |
2796 | 2798 | ||
2797 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2799 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2798 | name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, | 2800 | name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, |
2799 | fromName, PATH_MAX, nls_codepage, | 2801 | fromName, PATH_MAX, nls_codepage, |
2800 | remap); | 2802 | remap); |
2801 | name_len++; /* trailing null */ | 2803 | name_len++; /* trailing null */ |
2802 | name_len *= 2; | 2804 | name_len *= 2; |
2803 | pSMB->OldFileName[name_len] = 0x04; /* pad */ | 2805 | pSMB->OldFileName[name_len] = 0x04; /* pad */ |
2804 | /* protocol requires ASCII signature byte on Unicode string */ | 2806 | /* protocol requires ASCII signature byte on Unicode string */ |
2805 | pSMB->OldFileName[name_len + 1] = 0x00; | 2807 | pSMB->OldFileName[name_len + 1] = 0x00; |
2806 | name_len2 = | 2808 | name_len2 = |
2807 | cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], | 2809 | cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], |
2808 | toName, PATH_MAX, nls_codepage, remap); | 2810 | toName, PATH_MAX, nls_codepage, remap); |
2809 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; | 2811 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; |
2810 | name_len2 *= 2; /* convert to bytes */ | 2812 | name_len2 *= 2; /* convert to bytes */ |
2811 | } else { /* BB improve the check for buffer overruns BB */ | 2813 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -2861,9 +2863,9 @@ createSymLinkRetry: | |||
2861 | 2863 | ||
2862 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2864 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2863 | name_len = | 2865 | name_len = |
2864 | cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX | 2866 | cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, |
2865 | /* find define for this maxpathcomponent */ | 2867 | /* find define for this maxpathcomponent */ |
2866 | , nls_codepage); | 2868 | PATH_MAX, nls_codepage); |
2867 | name_len++; /* trailing null */ | 2869 | name_len++; /* trailing null */ |
2868 | name_len *= 2; | 2870 | name_len *= 2; |
2869 | 2871 | ||
@@ -2885,9 +2887,9 @@ createSymLinkRetry: | |||
2885 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; | 2887 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; |
2886 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2888 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2887 | name_len_target = | 2889 | name_len_target = |
2888 | cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX | 2890 | cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX |
2889 | /* find define for this maxpathcomponent */ | 2891 | /* find define for this maxpathcomponent */ |
2890 | , nls_codepage); | 2892 | , nls_codepage); |
2891 | name_len_target++; /* trailing null */ | 2893 | name_len_target++; /* trailing null */ |
2892 | name_len_target *= 2; | 2894 | name_len_target *= 2; |
2893 | } else { /* BB improve the check for buffer overruns BB */ | 2895 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -2949,8 +2951,8 @@ createHardLinkRetry: | |||
2949 | return rc; | 2951 | return rc; |
2950 | 2952 | ||
2951 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2953 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2952 | name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName, | 2954 | name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName, |
2953 | PATH_MAX, nls_codepage, remap); | 2955 | PATH_MAX, nls_codepage, remap); |
2954 | name_len++; /* trailing null */ | 2956 | name_len++; /* trailing null */ |
2955 | name_len *= 2; | 2957 | name_len *= 2; |
2956 | 2958 | ||
@@ -2972,8 +2974,8 @@ createHardLinkRetry: | |||
2972 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; | 2974 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; |
2973 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 2975 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
2974 | name_len_target = | 2976 | name_len_target = |
2975 | cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX, | 2977 | cifsConvertToUTF16((__le16 *) data_offset, fromName, |
2976 | nls_codepage, remap); | 2978 | PATH_MAX, nls_codepage, remap); |
2977 | name_len_target++; /* trailing null */ | 2979 | name_len_target++; /* trailing null */ |
2978 | name_len_target *= 2; | 2980 | name_len_target *= 2; |
2979 | } else { /* BB improve the check for buffer overruns BB */ | 2981 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -3042,8 +3044,8 @@ winCreateHardLinkRetry: | |||
3042 | 3044 | ||
3043 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 3045 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
3044 | name_len = | 3046 | name_len = |
3045 | cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, | 3047 | cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName, |
3046 | PATH_MAX, nls_codepage, remap); | 3048 | PATH_MAX, nls_codepage, remap); |
3047 | name_len++; /* trailing null */ | 3049 | name_len++; /* trailing null */ |
3048 | name_len *= 2; | 3050 | name_len *= 2; |
3049 | 3051 | ||
@@ -3051,8 +3053,8 @@ winCreateHardLinkRetry: | |||
3051 | pSMB->OldFileName[name_len] = 0x04; | 3053 | pSMB->OldFileName[name_len] = 0x04; |
3052 | pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ | 3054 | pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ |
3053 | name_len2 = | 3055 | name_len2 = |
3054 | cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], | 3056 | cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], |
3055 | toName, PATH_MAX, nls_codepage, remap); | 3057 | toName, PATH_MAX, nls_codepage, remap); |
3056 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; | 3058 | name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; |
3057 | name_len2 *= 2; /* convert to bytes */ | 3059 | name_len2 *= 2; /* convert to bytes */ |
3058 | } else { /* BB improve the check for buffer overruns BB */ | 3060 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -3108,8 +3110,8 @@ querySymLinkRetry: | |||
3108 | 3110 | ||
3109 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 3111 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
3110 | name_len = | 3112 | name_len = |
3111 | cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, | 3113 | cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, |
3112 | PATH_MAX, nls_codepage); | 3114 | PATH_MAX, nls_codepage); |
3113 | name_len++; /* trailing null */ | 3115 | name_len++; /* trailing null */ |
3114 | name_len *= 2; | 3116 | name_len *= 2; |
3115 | } else { /* BB improve the check for buffer overruns BB */ | 3117 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -3166,8 +3168,8 @@ querySymLinkRetry: | |||
3166 | is_unicode = false; | 3168 | is_unicode = false; |
3167 | 3169 | ||
3168 | /* BB FIXME investigate remapping reserved chars here */ | 3170 | /* BB FIXME investigate remapping reserved chars here */ |
3169 | *symlinkinfo = cifs_strndup_from_ucs(data_start, count, | 3171 | *symlinkinfo = cifs_strndup_from_utf16(data_start, |
3170 | is_unicode, nls_codepage); | 3172 | count, is_unicode, nls_codepage); |
3171 | if (!*symlinkinfo) | 3173 | if (!*symlinkinfo) |
3172 | rc = -ENOMEM; | 3174 | rc = -ENOMEM; |
3173 | } | 3175 | } |
@@ -3450,8 +3452,9 @@ queryAclRetry: | |||
3450 | 3452 | ||
3451 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 3453 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
3452 | name_len = | 3454 | name_len = |
3453 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 3455 | cifsConvertToUTF16((__le16 *) pSMB->FileName, |
3454 | PATH_MAX, nls_codepage, remap); | 3456 | searchName, PATH_MAX, nls_codepage, |
3457 | remap); | ||
3455 | name_len++; /* trailing null */ | 3458 | name_len++; /* trailing null */ |
3456 | name_len *= 2; | 3459 | name_len *= 2; |
3457 | pSMB->FileName[name_len] = 0; | 3460 | pSMB->FileName[name_len] = 0; |
@@ -3537,8 +3540,8 @@ setAclRetry: | |||
3537 | return rc; | 3540 | return rc; |
3538 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 3541 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
3539 | name_len = | 3542 | name_len = |
3540 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 3543 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
3541 | PATH_MAX, nls_codepage, remap); | 3544 | PATH_MAX, nls_codepage, remap); |
3542 | name_len++; /* trailing null */ | 3545 | name_len++; /* trailing null */ |
3543 | name_len *= 2; | 3546 | name_len *= 2; |
3544 | } else { /* BB improve the check for buffer overruns BB */ | 3547 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -3948,8 +3951,9 @@ QInfRetry: | |||
3948 | 3951 | ||
3949 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 3952 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
3950 | name_len = | 3953 | name_len = |
3951 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 3954 | cifsConvertToUTF16((__le16 *) pSMB->FileName, |
3952 | PATH_MAX, nls_codepage, remap); | 3955 | searchName, PATH_MAX, nls_codepage, |
3956 | remap); | ||
3953 | name_len++; /* trailing null */ | 3957 | name_len++; /* trailing null */ |
3954 | name_len *= 2; | 3958 | name_len *= 2; |
3955 | } else { | 3959 | } else { |
@@ -4086,8 +4090,8 @@ QPathInfoRetry: | |||
4086 | 4090 | ||
4087 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 4091 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
4088 | name_len = | 4092 | name_len = |
4089 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 4093 | cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, |
4090 | PATH_MAX, nls_codepage, remap); | 4094 | PATH_MAX, nls_codepage, remap); |
4091 | name_len++; /* trailing null */ | 4095 | name_len++; /* trailing null */ |
4092 | name_len *= 2; | 4096 | name_len *= 2; |
4093 | } else { /* BB improve the check for buffer overruns BB */ | 4097 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -4255,8 +4259,8 @@ UnixQPathInfoRetry: | |||
4255 | 4259 | ||
4256 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 4260 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
4257 | name_len = | 4261 | name_len = |
4258 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 4262 | cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, |
4259 | PATH_MAX, nls_codepage, remap); | 4263 | PATH_MAX, nls_codepage, remap); |
4260 | name_len++; /* trailing null */ | 4264 | name_len++; /* trailing null */ |
4261 | name_len *= 2; | 4265 | name_len *= 2; |
4262 | } else { /* BB improve the check for buffer overruns BB */ | 4266 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -4344,8 +4348,8 @@ findFirstRetry: | |||
4344 | 4348 | ||
4345 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 4349 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
4346 | name_len = | 4350 | name_len = |
4347 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 4351 | cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, |
4348 | PATH_MAX, nls_codepage, remap); | 4352 | PATH_MAX, nls_codepage, remap); |
4349 | /* We can not add the asterik earlier in case | 4353 | /* We can not add the asterik earlier in case |
4350 | it got remapped to 0xF03A as if it were part of the | 4354 | it got remapped to 0xF03A as if it were part of the |
4351 | directory name instead of a wildcard */ | 4355 | directory name instead of a wildcard */ |
@@ -4656,8 +4660,9 @@ GetInodeNumberRetry: | |||
4656 | 4660 | ||
4657 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 4661 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
4658 | name_len = | 4662 | name_len = |
4659 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 4663 | cifsConvertToUTF16((__le16 *) pSMB->FileName, |
4660 | PATH_MAX, nls_codepage, remap); | 4664 | searchName, PATH_MAX, nls_codepage, |
4665 | remap); | ||
4661 | name_len++; /* trailing null */ | 4666 | name_len++; /* trailing null */ |
4662 | name_len *= 2; | 4667 | name_len *= 2; |
4663 | } else { /* BB improve the check for buffer overruns BB */ | 4668 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -4794,9 +4799,9 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, | |||
4794 | rc = -ENOMEM; | 4799 | rc = -ENOMEM; |
4795 | goto parse_DFS_referrals_exit; | 4800 | goto parse_DFS_referrals_exit; |
4796 | } | 4801 | } |
4797 | cifsConvertToUCS((__le16 *) tmp, searchName, | 4802 | cifsConvertToUTF16((__le16 *) tmp, searchName, |
4798 | PATH_MAX, nls_codepage, remap); | 4803 | PATH_MAX, nls_codepage, remap); |
4799 | node->path_consumed = cifs_ucs2_bytes(tmp, | 4804 | node->path_consumed = cifs_utf16_bytes(tmp, |
4800 | le16_to_cpu(pSMBr->PathConsumed), | 4805 | le16_to_cpu(pSMBr->PathConsumed), |
4801 | nls_codepage); | 4806 | nls_codepage); |
4802 | kfree(tmp); | 4807 | kfree(tmp); |
@@ -4809,8 +4814,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, | |||
4809 | /* copy DfsPath */ | 4814 | /* copy DfsPath */ |
4810 | temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); | 4815 | temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); |
4811 | max_len = data_end - temp; | 4816 | max_len = data_end - temp; |
4812 | node->path_name = cifs_strndup_from_ucs(temp, max_len, | 4817 | node->path_name = cifs_strndup_from_utf16(temp, max_len, |
4813 | is_unicode, nls_codepage); | 4818 | is_unicode, nls_codepage); |
4814 | if (!node->path_name) { | 4819 | if (!node->path_name) { |
4815 | rc = -ENOMEM; | 4820 | rc = -ENOMEM; |
4816 | goto parse_DFS_referrals_exit; | 4821 | goto parse_DFS_referrals_exit; |
@@ -4819,8 +4824,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, | |||
4819 | /* copy link target UNC */ | 4824 | /* copy link target UNC */ |
4820 | temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); | 4825 | temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); |
4821 | max_len = data_end - temp; | 4826 | max_len = data_end - temp; |
4822 | node->node_name = cifs_strndup_from_ucs(temp, max_len, | 4827 | node->node_name = cifs_strndup_from_utf16(temp, max_len, |
4823 | is_unicode, nls_codepage); | 4828 | is_unicode, nls_codepage); |
4824 | if (!node->node_name) | 4829 | if (!node->node_name) |
4825 | rc = -ENOMEM; | 4830 | rc = -ENOMEM; |
4826 | } | 4831 | } |
@@ -4873,8 +4878,9 @@ getDFSRetry: | |||
4873 | if (ses->capabilities & CAP_UNICODE) { | 4878 | if (ses->capabilities & CAP_UNICODE) { |
4874 | pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; | 4879 | pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; |
4875 | name_len = | 4880 | name_len = |
4876 | cifsConvertToUCS((__le16 *) pSMB->RequestFileName, | 4881 | cifsConvertToUTF16((__le16 *) pSMB->RequestFileName, |
4877 | searchName, PATH_MAX, nls_codepage, remap); | 4882 | searchName, PATH_MAX, nls_codepage, |
4883 | remap); | ||
4878 | name_len++; /* trailing null */ | 4884 | name_len++; /* trailing null */ |
4879 | name_len *= 2; | 4885 | name_len *= 2; |
4880 | } else { /* BB improve the check for buffer overruns BB */ | 4886 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -5506,8 +5512,8 @@ SetEOFRetry: | |||
5506 | 5512 | ||
5507 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 5513 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
5508 | name_len = | 5514 | name_len = |
5509 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 5515 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
5510 | PATH_MAX, nls_codepage, remap); | 5516 | PATH_MAX, nls_codepage, remap); |
5511 | name_len++; /* trailing null */ | 5517 | name_len++; /* trailing null */ |
5512 | name_len *= 2; | 5518 | name_len *= 2; |
5513 | } else { /* BB improve the check for buffer overruns BB */ | 5519 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -5796,8 +5802,8 @@ SetTimesRetry: | |||
5796 | 5802 | ||
5797 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 5803 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
5798 | name_len = | 5804 | name_len = |
5799 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 5805 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
5800 | PATH_MAX, nls_codepage, remap); | 5806 | PATH_MAX, nls_codepage, remap); |
5801 | name_len++; /* trailing null */ | 5807 | name_len++; /* trailing null */ |
5802 | name_len *= 2; | 5808 | name_len *= 2; |
5803 | } else { /* BB improve the check for buffer overruns BB */ | 5809 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -5877,8 +5883,8 @@ SetAttrLgcyRetry: | |||
5877 | 5883 | ||
5878 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 5884 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
5879 | name_len = | 5885 | name_len = |
5880 | ConvertToUCS((__le16 *) pSMB->fileName, fileName, | 5886 | ConvertToUTF16((__le16 *) pSMB->fileName, fileName, |
5881 | PATH_MAX, nls_codepage); | 5887 | PATH_MAX, nls_codepage); |
5882 | name_len++; /* trailing null */ | 5888 | name_len++; /* trailing null */ |
5883 | name_len *= 2; | 5889 | name_len *= 2; |
5884 | } else { /* BB improve the check for buffer overruns BB */ | 5890 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -6030,8 +6036,8 @@ setPermsRetry: | |||
6030 | 6036 | ||
6031 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 6037 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
6032 | name_len = | 6038 | name_len = |
6033 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 6039 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
6034 | PATH_MAX, nls_codepage, remap); | 6040 | PATH_MAX, nls_codepage, remap); |
6035 | name_len++; /* trailing null */ | 6041 | name_len++; /* trailing null */ |
6036 | name_len *= 2; | 6042 | name_len *= 2; |
6037 | } else { /* BB improve the check for buffer overruns BB */ | 6043 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -6123,8 +6129,8 @@ QAllEAsRetry: | |||
6123 | 6129 | ||
6124 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 6130 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
6125 | list_len = | 6131 | list_len = |
6126 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | 6132 | cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, |
6127 | PATH_MAX, nls_codepage, remap); | 6133 | PATH_MAX, nls_codepage, remap); |
6128 | list_len++; /* trailing null */ | 6134 | list_len++; /* trailing null */ |
6129 | list_len *= 2; | 6135 | list_len *= 2; |
6130 | } else { /* BB improve the check for buffer overruns BB */ | 6136 | } else { /* BB improve the check for buffer overruns BB */ |
@@ -6301,8 +6307,8 @@ SetEARetry: | |||
6301 | 6307 | ||
6302 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | 6308 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { |
6303 | name_len = | 6309 | name_len = |
6304 | cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, | 6310 | cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, |
6305 | PATH_MAX, nls_codepage, remap); | 6311 | PATH_MAX, nls_codepage, remap); |
6306 | name_len++; /* trailing null */ | 6312 | name_len++; /* trailing null */ |
6307 | name_len *= 2; | 6313 | name_len *= 2; |
6308 | } else { /* BB improve the check for buffer overruns BB */ | 6314 | } else { /* BB improve the check for buffer overruns BB */ |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 4666780f315d..602f77c304c9 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <linux/inet.h> | 39 | #include <linux/inet.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <keys/user-type.h> | ||
41 | #include <net/ipv6.h> | 42 | #include <net/ipv6.h> |
42 | #include "cifspdu.h" | 43 | #include "cifspdu.h" |
43 | #include "cifsglob.h" | 44 | #include "cifsglob.h" |
@@ -225,74 +226,90 @@ static int check2ndT2(struct smb_hdr *pSMB) | |||
225 | 226 | ||
226 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | 227 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) |
227 | { | 228 | { |
228 | struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; | 229 | struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond; |
229 | struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; | 230 | struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; |
230 | char *data_area_of_target; | 231 | char *data_area_of_tgt; |
231 | char *data_area_of_buf2; | 232 | char *data_area_of_src; |
232 | int remaining; | 233 | int remaining; |
233 | unsigned int byte_count, total_in_buf; | 234 | unsigned int byte_count, total_in_tgt; |
234 | __u16 total_data_size, total_in_buf2; | 235 | __u16 tgt_total_cnt, src_total_cnt, total_in_src; |
235 | 236 | ||
236 | total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); | 237 | src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount); |
238 | tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); | ||
237 | 239 | ||
238 | if (total_data_size != | 240 | if (tgt_total_cnt != src_total_cnt) |
239 | get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount)) | 241 | cFYI(1, "total data count of primary and secondary t2 differ " |
240 | cFYI(1, "total data size of primary and secondary t2 differ"); | 242 | "source=%hu target=%hu", src_total_cnt, tgt_total_cnt); |
241 | 243 | ||
242 | total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); | 244 | total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); |
243 | 245 | ||
244 | remaining = total_data_size - total_in_buf; | 246 | remaining = tgt_total_cnt - total_in_tgt; |
245 | 247 | ||
246 | if (remaining < 0) | 248 | if (remaining < 0) { |
249 | cFYI(1, "Server sent too much data. tgt_total_cnt=%hu " | ||
250 | "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt); | ||
247 | return -EPROTO; | 251 | return -EPROTO; |
252 | } | ||
248 | 253 | ||
249 | if (remaining == 0) /* nothing to do, ignore */ | 254 | if (remaining == 0) { |
255 | /* nothing to do, ignore */ | ||
256 | cFYI(1, "no more data remains"); | ||
250 | return 0; | 257 | return 0; |
258 | } | ||
251 | 259 | ||
252 | total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount); | 260 | total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount); |
253 | if (remaining < total_in_buf2) { | 261 | if (remaining < total_in_src) |
254 | cFYI(1, "transact2 2nd response contains too much data"); | 262 | cFYI(1, "transact2 2nd response contains too much data"); |
255 | } | ||
256 | 263 | ||
257 | /* find end of first SMB data area */ | 264 | /* find end of first SMB data area */ |
258 | data_area_of_target = (char *)&pSMBt->hdr.Protocol + | 265 | data_area_of_tgt = (char *)&pSMBt->hdr.Protocol + |
259 | get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); | 266 | get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); |
260 | /* validate target area */ | ||
261 | 267 | ||
262 | data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol + | 268 | /* validate target area */ |
263 | get_unaligned_le16(&pSMB2->t2_rsp.DataOffset); | 269 | data_area_of_src = (char *)&pSMBs->hdr.Protocol + |
270 | get_unaligned_le16(&pSMBs->t2_rsp.DataOffset); | ||
264 | 271 | ||
265 | data_area_of_target += total_in_buf; | 272 | data_area_of_tgt += total_in_tgt; |
266 | 273 | ||
267 | /* copy second buffer into end of first buffer */ | 274 | total_in_tgt += total_in_src; |
268 | total_in_buf += total_in_buf2; | ||
269 | /* is the result too big for the field? */ | 275 | /* is the result too big for the field? */ |
270 | if (total_in_buf > USHRT_MAX) | 276 | if (total_in_tgt > USHRT_MAX) { |
277 | cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt); | ||
271 | return -EPROTO; | 278 | return -EPROTO; |
272 | put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); | 279 | } |
280 | put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount); | ||
273 | 281 | ||
274 | /* fix up the BCC */ | 282 | /* fix up the BCC */ |
275 | byte_count = get_bcc(pTargetSMB); | 283 | byte_count = get_bcc(pTargetSMB); |
276 | byte_count += total_in_buf2; | 284 | byte_count += total_in_src; |
277 | /* is the result too big for the field? */ | 285 | /* is the result too big for the field? */ |
278 | if (byte_count > USHRT_MAX) | 286 | if (byte_count > USHRT_MAX) { |
287 | cFYI(1, "coalesced BCC too large (%u)", byte_count); | ||
279 | return -EPROTO; | 288 | return -EPROTO; |
289 | } | ||
280 | put_bcc(byte_count, pTargetSMB); | 290 | put_bcc(byte_count, pTargetSMB); |
281 | 291 | ||
282 | byte_count = be32_to_cpu(pTargetSMB->smb_buf_length); | 292 | byte_count = be32_to_cpu(pTargetSMB->smb_buf_length); |
283 | byte_count += total_in_buf2; | 293 | byte_count += total_in_src; |
284 | /* don't allow buffer to overflow */ | 294 | /* don't allow buffer to overflow */ |
285 | if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) | 295 | if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
296 | cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count); | ||
286 | return -ENOBUFS; | 297 | return -ENOBUFS; |
298 | } | ||
287 | pTargetSMB->smb_buf_length = cpu_to_be32(byte_count); | 299 | pTargetSMB->smb_buf_length = cpu_to_be32(byte_count); |
288 | 300 | ||
289 | memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); | 301 | /* copy second buffer into end of first buffer */ |
302 | memcpy(data_area_of_tgt, data_area_of_src, total_in_src); | ||
290 | 303 | ||
291 | if (remaining == total_in_buf2) { | 304 | if (remaining != total_in_src) { |
292 | cFYI(1, "found the last secondary response"); | 305 | /* more responses to go */ |
293 | return 0; /* we are done */ | 306 | cFYI(1, "waiting for more secondary responses"); |
294 | } else /* more responses to go */ | ||
295 | return 1; | 307 | return 1; |
308 | } | ||
309 | |||
310 | /* we are done */ | ||
311 | cFYI(1, "found the last secondary response"); | ||
312 | return 0; | ||
296 | } | 313 | } |
297 | 314 | ||
298 | static void | 315 | static void |
@@ -756,10 +773,11 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
756 | cifs_dump_mem("Bad SMB: ", buf, | 773 | cifs_dump_mem("Bad SMB: ", buf, |
757 | min_t(unsigned int, server->total_read, 48)); | 774 | min_t(unsigned int, server->total_read, 48)); |
758 | 775 | ||
759 | if (mid) | 776 | if (!mid) |
760 | handle_mid(mid, server, smb_buffer, length); | 777 | return length; |
761 | 778 | ||
762 | return length; | 779 | handle_mid(mid, server, smb_buffer, length); |
780 | return 0; | ||
763 | } | 781 | } |
764 | 782 | ||
765 | static int | 783 | static int |
@@ -1578,11 +1596,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1578 | } | 1596 | } |
1579 | } | 1597 | } |
1580 | 1598 | ||
1581 | if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) { | 1599 | #ifndef CONFIG_KEYS |
1582 | cERROR(1, "Multiuser mounts currently require krb5 " | 1600 | /* Muliuser mounts require CONFIG_KEYS support */ |
1583 | "authentication!"); | 1601 | if (vol->multiuser) { |
1602 | cERROR(1, "Multiuser mounts require kernels with " | ||
1603 | "CONFIG_KEYS enabled."); | ||
1584 | goto cifs_parse_mount_err; | 1604 | goto cifs_parse_mount_err; |
1585 | } | 1605 | } |
1606 | #endif | ||
1586 | 1607 | ||
1587 | if (vol->UNCip == NULL) | 1608 | if (vol->UNCip == NULL) |
1588 | vol->UNCip = &vol->UNC[2]; | 1609 | vol->UNCip = &vol->UNC[2]; |
@@ -1981,10 +2002,16 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol) | |||
1981 | return 0; | 2002 | return 0; |
1982 | break; | 2003 | break; |
1983 | default: | 2004 | default: |
2005 | /* NULL username means anonymous session */ | ||
2006 | if (ses->user_name == NULL) { | ||
2007 | if (!vol->nullauth) | ||
2008 | return 0; | ||
2009 | break; | ||
2010 | } | ||
2011 | |||
1984 | /* anything else takes username/password */ | 2012 | /* anything else takes username/password */ |
1985 | if (ses->user_name == NULL) | 2013 | if (strncmp(ses->user_name, |
1986 | return 0; | 2014 | vol->username ? vol->username : "", |
1987 | if (strncmp(ses->user_name, vol->username, | ||
1988 | MAX_USERNAME_SIZE)) | 2015 | MAX_USERNAME_SIZE)) |
1989 | return 0; | 2016 | return 0; |
1990 | if (strlen(vol->username) != 0 && | 2017 | if (strlen(vol->username) != 0 && |
@@ -2039,6 +2066,132 @@ cifs_put_smb_ses(struct cifs_ses *ses) | |||
2039 | cifs_put_tcp_session(server); | 2066 | cifs_put_tcp_session(server); |
2040 | } | 2067 | } |
2041 | 2068 | ||
2069 | #ifdef CONFIG_KEYS | ||
2070 | |||
2071 | /* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ | ||
2072 | #define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) | ||
2073 | |||
2074 | /* Populate username and pw fields from keyring if possible */ | ||
2075 | static int | ||
2076 | cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) | ||
2077 | { | ||
2078 | int rc = 0; | ||
2079 | char *desc, *delim, *payload; | ||
2080 | ssize_t len; | ||
2081 | struct key *key; | ||
2082 | struct TCP_Server_Info *server = ses->server; | ||
2083 | struct sockaddr_in *sa; | ||
2084 | struct sockaddr_in6 *sa6; | ||
2085 | struct user_key_payload *upayload; | ||
2086 | |||
2087 | desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); | ||
2088 | if (!desc) | ||
2089 | return -ENOMEM; | ||
2090 | |||
2091 | /* try to find an address key first */ | ||
2092 | switch (server->dstaddr.ss_family) { | ||
2093 | case AF_INET: | ||
2094 | sa = (struct sockaddr_in *)&server->dstaddr; | ||
2095 | sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); | ||
2096 | break; | ||
2097 | case AF_INET6: | ||
2098 | sa6 = (struct sockaddr_in6 *)&server->dstaddr; | ||
2099 | sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); | ||
2100 | break; | ||
2101 | default: | ||
2102 | cFYI(1, "Bad ss_family (%hu)", server->dstaddr.ss_family); | ||
2103 | rc = -EINVAL; | ||
2104 | goto out_err; | ||
2105 | } | ||
2106 | |||
2107 | cFYI(1, "%s: desc=%s", __func__, desc); | ||
2108 | key = request_key(&key_type_logon, desc, ""); | ||
2109 | if (IS_ERR(key)) { | ||
2110 | if (!ses->domainName) { | ||
2111 | cFYI(1, "domainName is NULL"); | ||
2112 | rc = PTR_ERR(key); | ||
2113 | goto out_err; | ||
2114 | } | ||
2115 | |||
2116 | /* didn't work, try to find a domain key */ | ||
2117 | sprintf(desc, "cifs:d:%s", ses->domainName); | ||
2118 | cFYI(1, "%s: desc=%s", __func__, desc); | ||
2119 | key = request_key(&key_type_logon, desc, ""); | ||
2120 | if (IS_ERR(key)) { | ||
2121 | rc = PTR_ERR(key); | ||
2122 | goto out_err; | ||
2123 | } | ||
2124 | } | ||
2125 | |||
2126 | down_read(&key->sem); | ||
2127 | upayload = key->payload.data; | ||
2128 | if (IS_ERR_OR_NULL(upayload)) { | ||
2129 | rc = upayload ? PTR_ERR(upayload) : -EINVAL; | ||
2130 | goto out_key_put; | ||
2131 | } | ||
2132 | |||
2133 | /* find first : in payload */ | ||
2134 | payload = (char *)upayload->data; | ||
2135 | delim = strnchr(payload, upayload->datalen, ':'); | ||
2136 | cFYI(1, "payload=%s", payload); | ||
2137 | if (!delim) { | ||
2138 | cFYI(1, "Unable to find ':' in payload (datalen=%d)", | ||
2139 | upayload->datalen); | ||
2140 | rc = -EINVAL; | ||
2141 | goto out_key_put; | ||
2142 | } | ||
2143 | |||
2144 | len = delim - payload; | ||
2145 | if (len > MAX_USERNAME_SIZE || len <= 0) { | ||
2146 | cFYI(1, "Bad value from username search (len=%zd)", len); | ||
2147 | rc = -EINVAL; | ||
2148 | goto out_key_put; | ||
2149 | } | ||
2150 | |||
2151 | vol->username = kstrndup(payload, len, GFP_KERNEL); | ||
2152 | if (!vol->username) { | ||
2153 | cFYI(1, "Unable to allocate %zd bytes for username", len); | ||
2154 | rc = -ENOMEM; | ||
2155 | goto out_key_put; | ||
2156 | } | ||
2157 | cFYI(1, "%s: username=%s", __func__, vol->username); | ||
2158 | |||
2159 | len = key->datalen - (len + 1); | ||
2160 | if (len > MAX_PASSWORD_SIZE || len <= 0) { | ||
2161 | cFYI(1, "Bad len for password search (len=%zd)", len); | ||
2162 | rc = -EINVAL; | ||
2163 | kfree(vol->username); | ||
2164 | vol->username = NULL; | ||
2165 | goto out_key_put; | ||
2166 | } | ||
2167 | |||
2168 | ++delim; | ||
2169 | vol->password = kstrndup(delim, len, GFP_KERNEL); | ||
2170 | if (!vol->password) { | ||
2171 | cFYI(1, "Unable to allocate %zd bytes for password", len); | ||
2172 | rc = -ENOMEM; | ||
2173 | kfree(vol->username); | ||
2174 | vol->username = NULL; | ||
2175 | goto out_key_put; | ||
2176 | } | ||
2177 | |||
2178 | out_key_put: | ||
2179 | up_read(&key->sem); | ||
2180 | key_put(key); | ||
2181 | out_err: | ||
2182 | kfree(desc); | ||
2183 | cFYI(1, "%s: returning %d", __func__, rc); | ||
2184 | return rc; | ||
2185 | } | ||
2186 | #else /* ! CONFIG_KEYS */ | ||
2187 | static inline int | ||
2188 | cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)), | ||
2189 | struct cifs_ses *ses __attribute__((unused))) | ||
2190 | { | ||
2191 | return -ENOSYS; | ||
2192 | } | ||
2193 | #endif /* CONFIG_KEYS */ | ||
2194 | |||
2042 | static bool warned_on_ntlm; /* globals init to false automatically */ | 2195 | static bool warned_on_ntlm; /* globals init to false automatically */ |
2043 | 2196 | ||
2044 | static struct cifs_ses * | 2197 | static struct cifs_ses * |
@@ -2914,18 +3067,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2914 | #define CIFS_DEFAULT_IOSIZE (1024 * 1024) | 3067 | #define CIFS_DEFAULT_IOSIZE (1024 * 1024) |
2915 | 3068 | ||
2916 | /* | 3069 | /* |
2917 | * Windows only supports a max of 60k reads. Default to that when posix | 3070 | * Windows only supports a max of 60kb reads and 65535 byte writes. Default to |
2918 | * extensions aren't in force. | 3071 | * those values when posix extensions aren't in force. In actuality here, we |
3072 | * use 65536 to allow for a write that is a multiple of 4k. Most servers seem | ||
3073 | * to be ok with the extra byte even though Windows doesn't send writes that | ||
3074 | * are that large. | ||
3075 | * | ||
3076 | * Citation: | ||
3077 | * | ||
3078 | * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx | ||
2919 | */ | 3079 | */ |
2920 | #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) | 3080 | #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) |
3081 | #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) | ||
2921 | 3082 | ||
2922 | static unsigned int | 3083 | static unsigned int |
2923 | cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | 3084 | cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) |
2924 | { | 3085 | { |
2925 | __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); | 3086 | __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); |
2926 | struct TCP_Server_Info *server = tcon->ses->server; | 3087 | struct TCP_Server_Info *server = tcon->ses->server; |
2927 | unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : | 3088 | unsigned int wsize; |
2928 | CIFS_DEFAULT_IOSIZE; | 3089 | |
3090 | /* start with specified wsize, or default */ | ||
3091 | if (pvolume_info->wsize) | ||
3092 | wsize = pvolume_info->wsize; | ||
3093 | else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) | ||
3094 | wsize = CIFS_DEFAULT_IOSIZE; | ||
3095 | else | ||
3096 | wsize = CIFS_DEFAULT_NON_POSIX_WSIZE; | ||
2929 | 3097 | ||
2930 | /* can server support 24-bit write sizes? (via UNIX extensions) */ | 3098 | /* can server support 24-bit write sizes? (via UNIX extensions) */ |
2931 | if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) | 3099 | if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) |
@@ -3136,10 +3304,9 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, | |||
3136 | return -EINVAL; | 3304 | return -EINVAL; |
3137 | 3305 | ||
3138 | if (volume_info->nullauth) { | 3306 | if (volume_info->nullauth) { |
3139 | cFYI(1, "null user"); | 3307 | cFYI(1, "Anonymous login"); |
3140 | volume_info->username = kzalloc(1, GFP_KERNEL); | 3308 | kfree(volume_info->username); |
3141 | if (volume_info->username == NULL) | 3309 | volume_info->username = NULL; |
3142 | return -ENOMEM; | ||
3143 | } else if (volume_info->username) { | 3310 | } else if (volume_info->username) { |
3144 | /* BB fixme parse for domain name here */ | 3311 | /* BB fixme parse for domain name here */ |
3145 | cFYI(1, "Username: %s", volume_info->username); | 3312 | cFYI(1, "Username: %s", volume_info->username); |
@@ -3478,7 +3645,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
3478 | if (ses->capabilities & CAP_UNICODE) { | 3645 | if (ses->capabilities & CAP_UNICODE) { |
3479 | smb_buffer->Flags2 |= SMBFLG2_UNICODE; | 3646 | smb_buffer->Flags2 |= SMBFLG2_UNICODE; |
3480 | length = | 3647 | length = |
3481 | cifs_strtoUCS((__le16 *) bcc_ptr, tree, | 3648 | cifs_strtoUTF16((__le16 *) bcc_ptr, tree, |
3482 | 6 /* max utf8 char length in bytes */ * | 3649 | 6 /* max utf8 char length in bytes */ * |
3483 | (/* server len*/ + 256 /* share len */), nls_codepage); | 3650 | (/* server len*/ + 256 /* share len */), nls_codepage); |
3484 | bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ | 3651 | bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ |
@@ -3533,7 +3700,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
3533 | 3700 | ||
3534 | /* mostly informational -- no need to fail on error here */ | 3701 | /* mostly informational -- no need to fail on error here */ |
3535 | kfree(tcon->nativeFileSystem); | 3702 | kfree(tcon->nativeFileSystem); |
3536 | tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, | 3703 | tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, |
3537 | bytes_left, is_unicode, | 3704 | bytes_left, is_unicode, |
3538 | nls_codepage); | 3705 | nls_codepage); |
3539 | 3706 | ||
@@ -3657,25 +3824,43 @@ int cifs_setup_session(unsigned int xid, struct cifs_ses *ses, | |||
3657 | return rc; | 3824 | return rc; |
3658 | } | 3825 | } |
3659 | 3826 | ||
3827 | static int | ||
3828 | cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses) | ||
3829 | { | ||
3830 | switch (ses->server->secType) { | ||
3831 | case Kerberos: | ||
3832 | vol->secFlg = CIFSSEC_MUST_KRB5; | ||
3833 | return 0; | ||
3834 | case NTLMv2: | ||
3835 | vol->secFlg = CIFSSEC_MUST_NTLMV2; | ||
3836 | break; | ||
3837 | case NTLM: | ||
3838 | vol->secFlg = CIFSSEC_MUST_NTLM; | ||
3839 | break; | ||
3840 | case RawNTLMSSP: | ||
3841 | vol->secFlg = CIFSSEC_MUST_NTLMSSP; | ||
3842 | break; | ||
3843 | case LANMAN: | ||
3844 | vol->secFlg = CIFSSEC_MUST_LANMAN; | ||
3845 | break; | ||
3846 | } | ||
3847 | |||
3848 | return cifs_set_cifscreds(vol, ses); | ||
3849 | } | ||
3850 | |||
3660 | static struct cifs_tcon * | 3851 | static struct cifs_tcon * |
3661 | cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) | 3852 | cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) |
3662 | { | 3853 | { |
3854 | int rc; | ||
3663 | struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); | 3855 | struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); |
3664 | struct cifs_ses *ses; | 3856 | struct cifs_ses *ses; |
3665 | struct cifs_tcon *tcon = NULL; | 3857 | struct cifs_tcon *tcon = NULL; |
3666 | struct smb_vol *vol_info; | 3858 | struct smb_vol *vol_info; |
3667 | char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */ | ||
3668 | /* We used to have this as MAX_USERNAME which is */ | ||
3669 | /* way too big now (256 instead of 32) */ | ||
3670 | 3859 | ||
3671 | vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); | 3860 | vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); |
3672 | if (vol_info == NULL) { | 3861 | if (vol_info == NULL) |
3673 | tcon = ERR_PTR(-ENOMEM); | 3862 | return ERR_PTR(-ENOMEM); |
3674 | goto out; | ||
3675 | } | ||
3676 | 3863 | ||
3677 | snprintf(username, sizeof(username), "krb50x%x", fsuid); | ||
3678 | vol_info->username = username; | ||
3679 | vol_info->local_nls = cifs_sb->local_nls; | 3864 | vol_info->local_nls = cifs_sb->local_nls; |
3680 | vol_info->linux_uid = fsuid; | 3865 | vol_info->linux_uid = fsuid; |
3681 | vol_info->cred_uid = fsuid; | 3866 | vol_info->cred_uid = fsuid; |
@@ -3685,8 +3870,11 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) | |||
3685 | vol_info->local_lease = master_tcon->local_lease; | 3870 | vol_info->local_lease = master_tcon->local_lease; |
3686 | vol_info->no_linux_ext = !master_tcon->unix_ext; | 3871 | vol_info->no_linux_ext = !master_tcon->unix_ext; |
3687 | 3872 | ||
3688 | /* FIXME: allow for other secFlg settings */ | 3873 | rc = cifs_set_vol_auth(vol_info, master_tcon->ses); |
3689 | vol_info->secFlg = CIFSSEC_MUST_KRB5; | 3874 | if (rc) { |
3875 | tcon = ERR_PTR(rc); | ||
3876 | goto out; | ||
3877 | } | ||
3690 | 3878 | ||
3691 | /* get a reference for the same TCP session */ | 3879 | /* get a reference for the same TCP session */ |
3692 | spin_lock(&cifs_tcp_ses_lock); | 3880 | spin_lock(&cifs_tcp_ses_lock); |
@@ -3709,6 +3897,8 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) | |||
3709 | if (ses->capabilities & CAP_UNIX) | 3897 | if (ses->capabilities & CAP_UNIX) |
3710 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); | 3898 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); |
3711 | out: | 3899 | out: |
3900 | kfree(vol_info->username); | ||
3901 | kfree(vol_info->password); | ||
3712 | kfree(vol_info); | 3902 | kfree(vol_info); |
3713 | 3903 | ||
3714 | return tcon; | 3904 | return tcon; |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index df8fecb5b993..bc7e24420ac0 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
492 | { | 492 | { |
493 | int xid; | 493 | int xid; |
494 | int rc = 0; /* to get around spurious gcc warning, set to zero here */ | 494 | int rc = 0; /* to get around spurious gcc warning, set to zero here */ |
495 | __u32 oplock = 0; | 495 | __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0; |
496 | __u16 fileHandle = 0; | 496 | __u16 fileHandle = 0; |
497 | bool posix_open = false; | 497 | bool posix_open = false; |
498 | struct cifs_sb_info *cifs_sb; | 498 | struct cifs_sb_info *cifs_sb; |
@@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
584 | * If either that or op not supported returned, follow | 584 | * If either that or op not supported returned, follow |
585 | * the normal lookup. | 585 | * the normal lookup. |
586 | */ | 586 | */ |
587 | if ((rc == 0) || (rc == -ENOENT)) | 587 | switch (rc) { |
588 | case 0: | ||
589 | /* | ||
590 | * The server may allow us to open things like | ||
591 | * FIFOs, but the client isn't set up to deal | ||
592 | * with that. If it's not a regular file, just | ||
593 | * close it and proceed as if it were a normal | ||
594 | * lookup. | ||
595 | */ | ||
596 | if (newInode && !S_ISREG(newInode->i_mode)) { | ||
597 | CIFSSMBClose(xid, pTcon, fileHandle); | ||
598 | break; | ||
599 | } | ||
600 | case -ENOENT: | ||
588 | posix_open = true; | 601 | posix_open = true; |
589 | else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP)) | 602 | case -EOPNOTSUPP: |
603 | break; | ||
604 | default: | ||
590 | pTcon->broken_posix_open = true; | 605 | pTcon->broken_posix_open = true; |
606 | } | ||
591 | } | 607 | } |
592 | if (!posix_open) | 608 | if (!posix_open) |
593 | rc = cifs_get_inode_info_unix(&newInode, full_path, | 609 | rc = cifs_get_inode_info_unix(&newInode, full_path, |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4dd9283885e7..5e64748a2917 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
920 | for (lockp = &inode->i_flock; *lockp != NULL; \ | 920 | for (lockp = &inode->i_flock; *lockp != NULL; \ |
921 | lockp = &(*lockp)->fl_next) | 921 | lockp = &(*lockp)->fl_next) |
922 | 922 | ||
923 | struct lock_to_push { | ||
924 | struct list_head llist; | ||
925 | __u64 offset; | ||
926 | __u64 length; | ||
927 | __u32 pid; | ||
928 | __u16 netfid; | ||
929 | __u8 type; | ||
930 | }; | ||
931 | |||
923 | static int | 932 | static int |
924 | cifs_push_posix_locks(struct cifsFileInfo *cfile) | 933 | cifs_push_posix_locks(struct cifsFileInfo *cfile) |
925 | { | 934 | { |
926 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 935 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
927 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 936 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
928 | struct file_lock *flock, **before; | 937 | struct file_lock *flock, **before; |
929 | struct cifsLockInfo *lck, *tmp; | 938 | unsigned int count = 0, i = 0; |
930 | int rc = 0, xid, type; | 939 | int rc = 0, xid, type; |
940 | struct list_head locks_to_send, *el; | ||
941 | struct lock_to_push *lck, *tmp; | ||
931 | __u64 length; | 942 | __u64 length; |
932 | struct list_head locks_to_send; | ||
933 | 943 | ||
934 | xid = GetXid(); | 944 | xid = GetXid(); |
935 | 945 | ||
@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) | |||
940 | return rc; | 950 | return rc; |
941 | } | 951 | } |
942 | 952 | ||
953 | lock_flocks(); | ||
954 | cifs_for_each_lock(cfile->dentry->d_inode, before) { | ||
955 | if ((*before)->fl_flags & FL_POSIX) | ||
956 | count++; | ||
957 | } | ||
958 | unlock_flocks(); | ||
959 | |||
943 | INIT_LIST_HEAD(&locks_to_send); | 960 | INIT_LIST_HEAD(&locks_to_send); |
944 | 961 | ||
962 | /* | ||
963 | * Allocating count locks is enough because no locks can be added to | ||
964 | * the list while we are holding cinode->lock_mutex that protects | ||
965 | * locking operations of this inode. | ||
966 | */ | ||
967 | for (; i < count; i++) { | ||
968 | lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); | ||
969 | if (!lck) { | ||
970 | rc = -ENOMEM; | ||
971 | goto err_out; | ||
972 | } | ||
973 | list_add_tail(&lck->llist, &locks_to_send); | ||
974 | } | ||
975 | |||
976 | i = 0; | ||
977 | el = locks_to_send.next; | ||
945 | lock_flocks(); | 978 | lock_flocks(); |
946 | cifs_for_each_lock(cfile->dentry->d_inode, before) { | 979 | cifs_for_each_lock(cfile->dentry->d_inode, before) { |
980 | if (el == &locks_to_send) { | ||
981 | /* something is really wrong */ | ||
982 | cERROR(1, "Can't push all brlocks!"); | ||
983 | break; | ||
984 | } | ||
947 | flock = *before; | 985 | flock = *before; |
986 | if ((flock->fl_flags & FL_POSIX) == 0) | ||
987 | continue; | ||
948 | length = 1 + flock->fl_end - flock->fl_start; | 988 | length = 1 + flock->fl_end - flock->fl_start; |
949 | if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) | 989 | if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) |
950 | type = CIFS_RDLCK; | 990 | type = CIFS_RDLCK; |
951 | else | 991 | else |
952 | type = CIFS_WRLCK; | 992 | type = CIFS_WRLCK; |
953 | 993 | lck = list_entry(el, struct lock_to_push, llist); | |
954 | lck = cifs_lock_init(flock->fl_start, length, type, | ||
955 | cfile->netfid); | ||
956 | if (!lck) { | ||
957 | rc = -ENOMEM; | ||
958 | goto send_locks; | ||
959 | } | ||
960 | lck->pid = flock->fl_pid; | 994 | lck->pid = flock->fl_pid; |
961 | 995 | lck->netfid = cfile->netfid; | |
962 | list_add_tail(&lck->llist, &locks_to_send); | 996 | lck->length = length; |
997 | lck->type = type; | ||
998 | lck->offset = flock->fl_start; | ||
999 | i++; | ||
1000 | el = el->next; | ||
963 | } | 1001 | } |
964 | |||
965 | send_locks: | ||
966 | unlock_flocks(); | 1002 | unlock_flocks(); |
967 | 1003 | ||
968 | list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { | 1004 | list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { |
@@ -979,11 +1015,18 @@ send_locks: | |||
979 | kfree(lck); | 1015 | kfree(lck); |
980 | } | 1016 | } |
981 | 1017 | ||
1018 | out: | ||
982 | cinode->can_cache_brlcks = false; | 1019 | cinode->can_cache_brlcks = false; |
983 | mutex_unlock(&cinode->lock_mutex); | 1020 | mutex_unlock(&cinode->lock_mutex); |
984 | 1021 | ||
985 | FreeXid(xid); | 1022 | FreeXid(xid); |
986 | return rc; | 1023 | return rc; |
1024 | err_out: | ||
1025 | list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { | ||
1026 | list_del(&lck->llist); | ||
1027 | kfree(lck); | ||
1028 | } | ||
1029 | goto out; | ||
987 | } | 1030 | } |
988 | 1031 | ||
989 | static int | 1032 | static int |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index a5f54b7d9822..745da3d0653e 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -534,6 +534,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, | |||
534 | if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { | 534 | if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { |
535 | fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; | 535 | fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; |
536 | fattr->cf_dtype = DT_DIR; | 536 | fattr->cf_dtype = DT_DIR; |
537 | /* | ||
538 | * Server can return wrong NumberOfLinks value for directories | ||
539 | * when Unix extensions are disabled - fake it. | ||
540 | */ | ||
541 | fattr->cf_nlink = 2; | ||
537 | } else { | 542 | } else { |
538 | fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; | 543 | fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; |
539 | fattr->cf_dtype = DT_REG; | 544 | fattr->cf_dtype = DT_REG; |
@@ -541,9 +546,9 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, | |||
541 | /* clear write bits if ATTR_READONLY is set */ | 546 | /* clear write bits if ATTR_READONLY is set */ |
542 | if (fattr->cf_cifsattrs & ATTR_READONLY) | 547 | if (fattr->cf_cifsattrs & ATTR_READONLY) |
543 | fattr->cf_mode &= ~(S_IWUGO); | 548 | fattr->cf_mode &= ~(S_IWUGO); |
544 | } | ||
545 | 549 | ||
546 | fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); | 550 | fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); |
551 | } | ||
547 | 552 | ||
548 | fattr->cf_uid = cifs_sb->mnt_uid; | 553 | fattr->cf_uid = cifs_sb->mnt_uid; |
549 | fattr->cf_gid = cifs_sb->mnt_gid; | 554 | fattr->cf_gid = cifs_sb->mnt_gid; |
@@ -1322,7 +1327,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) | |||
1322 | } | 1327 | } |
1323 | /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need | 1328 | /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need |
1324 | to set uid/gid */ | 1329 | to set uid/gid */ |
1325 | inc_nlink(inode); | ||
1326 | 1330 | ||
1327 | cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); | 1331 | cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); |
1328 | cifs_fill_uniqueid(inode->i_sb, &fattr); | 1332 | cifs_fill_uniqueid(inode->i_sb, &fattr); |
@@ -1355,7 +1359,6 @@ mkdir_retry_old: | |||
1355 | d_drop(direntry); | 1359 | d_drop(direntry); |
1356 | } else { | 1360 | } else { |
1357 | mkdir_get_info: | 1361 | mkdir_get_info: |
1358 | inc_nlink(inode); | ||
1359 | if (pTcon->unix_ext) | 1362 | if (pTcon->unix_ext) |
1360 | rc = cifs_get_inode_info_unix(&newinode, full_path, | 1363 | rc = cifs_get_inode_info_unix(&newinode, full_path, |
1361 | inode->i_sb, xid); | 1364 | inode->i_sb, xid); |
@@ -1436,6 +1439,11 @@ mkdir_get_info: | |||
1436 | } | 1439 | } |
1437 | } | 1440 | } |
1438 | mkdir_out: | 1441 | mkdir_out: |
1442 | /* | ||
1443 | * Force revalidate to get parent dir info when needed since cached | ||
1444 | * attributes are invalid now. | ||
1445 | */ | ||
1446 | CIFS_I(inode)->time = 0; | ||
1439 | kfree(full_path); | 1447 | kfree(full_path); |
1440 | FreeXid(xid); | 1448 | FreeXid(xid); |
1441 | cifs_put_tlink(tlink); | 1449 | cifs_put_tlink(tlink); |
@@ -1475,7 +1483,6 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
1475 | cifs_put_tlink(tlink); | 1483 | cifs_put_tlink(tlink); |
1476 | 1484 | ||
1477 | if (!rc) { | 1485 | if (!rc) { |
1478 | drop_nlink(inode); | ||
1479 | spin_lock(&direntry->d_inode->i_lock); | 1486 | spin_lock(&direntry->d_inode->i_lock); |
1480 | i_size_write(direntry->d_inode, 0); | 1487 | i_size_write(direntry->d_inode, 0); |
1481 | clear_nlink(direntry->d_inode); | 1488 | clear_nlink(direntry->d_inode); |
@@ -1483,12 +1490,15 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
1483 | } | 1490 | } |
1484 | 1491 | ||
1485 | cifsInode = CIFS_I(direntry->d_inode); | 1492 | cifsInode = CIFS_I(direntry->d_inode); |
1486 | cifsInode->time = 0; /* force revalidate to go get info when | 1493 | /* force revalidate to go get info when needed */ |
1487 | needed */ | 1494 | cifsInode->time = 0; |
1488 | 1495 | ||
1489 | cifsInode = CIFS_I(inode); | 1496 | cifsInode = CIFS_I(inode); |
1490 | cifsInode->time = 0; /* force revalidate to get parent dir info | 1497 | /* |
1491 | since cached search results now invalid */ | 1498 | * Force revalidate to get parent dir info when needed since cached |
1499 | * attributes are invalid now. | ||
1500 | */ | ||
1501 | cifsInode->time = 0; | ||
1492 | 1502 | ||
1493 | direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime = | 1503 | direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime = |
1494 | current_fs_time(inode->i_sb); | 1504 | current_fs_time(inode->i_sb); |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index a090bbe6ee29..e2bbc683e018 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -647,10 +647,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, | |||
647 | 647 | ||
648 | name.name = scratch_buf; | 648 | name.name = scratch_buf; |
649 | name.len = | 649 | name.len = |
650 | cifs_from_ucs2((char *)name.name, (__le16 *)de.name, | 650 | cifs_from_utf16((char *)name.name, (__le16 *)de.name, |
651 | UNICODE_NAME_MAX, | 651 | UNICODE_NAME_MAX, |
652 | min(de.namelen, (size_t)max_len), nlt, | 652 | min_t(size_t, de.namelen, |
653 | cifs_sb->mnt_cifs_flags & | 653 | (size_t)max_len), nlt, |
654 | cifs_sb->mnt_cifs_flags & | ||
654 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 655 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
655 | name.len -= nls_nullsize(nlt); | 656 | name.len -= nls_nullsize(nlt); |
656 | } else { | 657 | } else { |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 4ec3ee9d72cc..551d0c2b9736 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -167,16 +167,16 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) | |||
167 | int bytes_ret = 0; | 167 | int bytes_ret = 0; |
168 | 168 | ||
169 | /* Copy OS version */ | 169 | /* Copy OS version */ |
170 | bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32, | 170 | bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32, |
171 | nls_cp); | 171 | nls_cp); |
172 | bcc_ptr += 2 * bytes_ret; | 172 | bcc_ptr += 2 * bytes_ret; |
173 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, init_utsname()->release, | 173 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release, |
174 | 32, nls_cp); | 174 | 32, nls_cp); |
175 | bcc_ptr += 2 * bytes_ret; | 175 | bcc_ptr += 2 * bytes_ret; |
176 | bcc_ptr += 2; /* trailing null */ | 176 | bcc_ptr += 2; /* trailing null */ |
177 | 177 | ||
178 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, | 178 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, |
179 | 32, nls_cp); | 179 | 32, nls_cp); |
180 | bcc_ptr += 2 * bytes_ret; | 180 | bcc_ptr += 2 * bytes_ret; |
181 | bcc_ptr += 2; /* trailing null */ | 181 | bcc_ptr += 2; /* trailing null */ |
182 | 182 | ||
@@ -197,8 +197,8 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, | |||
197 | *(bcc_ptr+1) = 0; | 197 | *(bcc_ptr+1) = 0; |
198 | bytes_ret = 0; | 198 | bytes_ret = 0; |
199 | } else | 199 | } else |
200 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName, | 200 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, |
201 | 256, nls_cp); | 201 | 256, nls_cp); |
202 | bcc_ptr += 2 * bytes_ret; | 202 | bcc_ptr += 2 * bytes_ret; |
203 | bcc_ptr += 2; /* account for null terminator */ | 203 | bcc_ptr += 2; /* account for null terminator */ |
204 | 204 | ||
@@ -226,8 +226,8 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, | |||
226 | *bcc_ptr = 0; | 226 | *bcc_ptr = 0; |
227 | *(bcc_ptr+1) = 0; | 227 | *(bcc_ptr+1) = 0; |
228 | } else { | 228 | } else { |
229 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name, | 229 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name, |
230 | MAX_USERNAME_SIZE, nls_cp); | 230 | MAX_USERNAME_SIZE, nls_cp); |
231 | } | 231 | } |
232 | bcc_ptr += 2 * bytes_ret; | 232 | bcc_ptr += 2 * bytes_ret; |
233 | bcc_ptr += 2; /* account for null termination */ | 233 | bcc_ptr += 2; /* account for null termination */ |
@@ -246,16 +246,15 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, | |||
246 | /* copy user */ | 246 | /* copy user */ |
247 | /* BB what about null user mounts - check that we do this BB */ | 247 | /* BB what about null user mounts - check that we do this BB */ |
248 | /* copy user */ | 248 | /* copy user */ |
249 | if (ses->user_name != NULL) | 249 | if (ses->user_name != NULL) { |
250 | strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); | 250 | strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); |
251 | bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE); | ||
252 | } | ||
251 | /* else null user mount */ | 253 | /* else null user mount */ |
252 | |||
253 | bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE); | ||
254 | *bcc_ptr = 0; | 254 | *bcc_ptr = 0; |
255 | bcc_ptr++; /* account for null termination */ | 255 | bcc_ptr++; /* account for null termination */ |
256 | 256 | ||
257 | /* copy domain */ | 257 | /* copy domain */ |
258 | |||
259 | if (ses->domainName != NULL) { | 258 | if (ses->domainName != NULL) { |
260 | strncpy(bcc_ptr, ses->domainName, 256); | 259 | strncpy(bcc_ptr, ses->domainName, 256); |
261 | bcc_ptr += strnlen(ses->domainName, 256); | 260 | bcc_ptr += strnlen(ses->domainName, 256); |
@@ -287,7 +286,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, | |||
287 | cFYI(1, "bleft %d", bleft); | 286 | cFYI(1, "bleft %d", bleft); |
288 | 287 | ||
289 | kfree(ses->serverOS); | 288 | kfree(ses->serverOS); |
290 | ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 289 | ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); |
291 | cFYI(1, "serverOS=%s", ses->serverOS); | 290 | cFYI(1, "serverOS=%s", ses->serverOS); |
292 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; | 291 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; |
293 | data += len; | 292 | data += len; |
@@ -296,7 +295,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, | |||
296 | return; | 295 | return; |
297 | 296 | ||
298 | kfree(ses->serverNOS); | 297 | kfree(ses->serverNOS); |
299 | ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 298 | ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); |
300 | cFYI(1, "serverNOS=%s", ses->serverNOS); | 299 | cFYI(1, "serverNOS=%s", ses->serverNOS); |
301 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; | 300 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; |
302 | data += len; | 301 | data += len; |
@@ -305,7 +304,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, | |||
305 | return; | 304 | return; |
306 | 305 | ||
307 | kfree(ses->serverDomain); | 306 | kfree(ses->serverDomain); |
308 | ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 307 | ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp); |
309 | cFYI(1, "serverDomain=%s", ses->serverDomain); | 308 | cFYI(1, "serverDomain=%s", ses->serverDomain); |
310 | 309 | ||
311 | return; | 310 | return; |
@@ -395,6 +394,10 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
395 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); | 394 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); |
396 | tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); | 395 | tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); |
397 | tilen = le16_to_cpu(pblob->TargetInfoArray.Length); | 396 | tilen = le16_to_cpu(pblob->TargetInfoArray.Length); |
397 | if (tioffset > blob_len || tioffset + tilen > blob_len) { | ||
398 | cERROR(1, "tioffset + tilen too high %u + %u", tioffset, tilen); | ||
399 | return -EINVAL; | ||
400 | } | ||
398 | if (tilen) { | 401 | if (tilen) { |
399 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); | 402 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); |
400 | if (!ses->auth_key.response) { | 403 | if (!ses->auth_key.response) { |
@@ -502,8 +505,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
502 | tmp += 2; | 505 | tmp += 2; |
503 | } else { | 506 | } else { |
504 | int len; | 507 | int len; |
505 | len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, | 508 | len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName, |
506 | MAX_USERNAME_SIZE, nls_cp); | 509 | MAX_USERNAME_SIZE, nls_cp); |
507 | len *= 2; /* unicode is 2 bytes each */ | 510 | len *= 2; /* unicode is 2 bytes each */ |
508 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 511 | sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
509 | sec_blob->DomainName.Length = cpu_to_le16(len); | 512 | sec_blob->DomainName.Length = cpu_to_le16(len); |
@@ -518,8 +521,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
518 | tmp += 2; | 521 | tmp += 2; |
519 | } else { | 522 | } else { |
520 | int len; | 523 | int len; |
521 | len = cifs_strtoUCS((__le16 *)tmp, ses->user_name, | 524 | len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name, |
522 | MAX_USERNAME_SIZE, nls_cp); | 525 | MAX_USERNAME_SIZE, nls_cp); |
523 | len *= 2; /* unicode is 2 bytes each */ | 526 | len *= 2; /* unicode is 2 bytes each */ |
524 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 527 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
525 | sec_blob->UserName.Length = cpu_to_le16(len); | 528 | sec_blob->UserName.Length = cpu_to_le16(len); |
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 80d850881938..d5cd9aa7eacc 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -213,7 +213,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, | |||
213 | 213 | ||
214 | /* Password cannot be longer than 128 characters */ | 214 | /* Password cannot be longer than 128 characters */ |
215 | if (passwd) /* Password must be converted to NT unicode */ | 215 | if (passwd) /* Password must be converted to NT unicode */ |
216 | len = cifs_strtoUCS(wpwd, passwd, 128, codepage); | 216 | len = cifs_strtoUTF16(wpwd, passwd, 128, codepage); |
217 | else { | 217 | else { |
218 | len = 0; | 218 | len = 0; |
219 | *wpwd = 0; /* Ensure string is null terminated */ | 219 | *wpwd = 0; /* Ensure string is null terminated */ |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 45f07c46f3ed..10d92cf57ab6 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
105 | struct cifs_tcon *pTcon; | 105 | struct cifs_tcon *pTcon; |
106 | struct super_block *sb; | 106 | struct super_block *sb; |
107 | char *full_path; | 107 | char *full_path; |
108 | struct cifs_ntsd *pacl; | ||
109 | 108 | ||
110 | if (direntry == NULL) | 109 | if (direntry == NULL) |
111 | return -EIO; | 110 | return -EIO; |
@@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
164 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 163 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
165 | } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, | 164 | } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, |
166 | strlen(CIFS_XATTR_CIFS_ACL)) == 0) { | 165 | strlen(CIFS_XATTR_CIFS_ACL)) == 0) { |
166 | #ifdef CONFIG_CIFS_ACL | ||
167 | struct cifs_ntsd *pacl; | ||
167 | pacl = kmalloc(value_size, GFP_KERNEL); | 168 | pacl = kmalloc(value_size, GFP_KERNEL); |
168 | if (!pacl) { | 169 | if (!pacl) { |
169 | cFYI(1, "%s: Can't allocate memory for ACL", | 170 | cFYI(1, "%s: Can't allocate memory for ACL", |
170 | __func__); | 171 | __func__); |
171 | rc = -ENOMEM; | 172 | rc = -ENOMEM; |
172 | } else { | 173 | } else { |
173 | #ifdef CONFIG_CIFS_ACL | ||
174 | memcpy(pacl, ea_value, value_size); | 174 | memcpy(pacl, ea_value, value_size); |
175 | rc = set_cifs_acl(pacl, value_size, | 175 | rc = set_cifs_acl(pacl, value_size, |
176 | direntry->d_inode, full_path, CIFS_ACL_DACL); | 176 | direntry->d_inode, full_path, CIFS_ACL_DACL); |
177 | if (rc == 0) /* force revalidate of the inode */ | 177 | if (rc == 0) /* force revalidate of the inode */ |
178 | CIFS_I(direntry->d_inode)->time = 0; | 178 | CIFS_I(direntry->d_inode)->time = 0; |
179 | kfree(pacl); | 179 | kfree(pacl); |
180 | } | ||
180 | #else | 181 | #else |
181 | cFYI(1, "Set CIFS ACL not supported yet"); | 182 | cFYI(1, "Set CIFS ACL not supported yet"); |
182 | #endif /* CONFIG_CIFS_ACL */ | 183 | #endif /* CONFIG_CIFS_ACL */ |
183 | } | ||
184 | } else { | 184 | } else { |
185 | int temp; | 185 | int temp; |
186 | temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, | 186 | temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, |
diff --git a/fs/compat.c b/fs/compat.c index fa9d721ecfee..07880bae28a9 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -131,41 +131,35 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim | |||
131 | 131 | ||
132 | static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) | 132 | static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) |
133 | { | 133 | { |
134 | compat_ino_t ino = stat->ino; | 134 | struct compat_stat tmp; |
135 | typeof(ubuf->st_uid) uid = 0; | ||
136 | typeof(ubuf->st_gid) gid = 0; | ||
137 | int err; | ||
138 | 135 | ||
139 | SET_UID(uid, stat->uid); | 136 | if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) |
140 | SET_GID(gid, stat->gid); | 137 | return -EOVERFLOW; |
141 | 138 | ||
142 | if ((u64) stat->size > MAX_NON_LFS || | 139 | memset(&tmp, 0, sizeof(tmp)); |
143 | !old_valid_dev(stat->dev) || | 140 | tmp.st_dev = old_encode_dev(stat->dev); |
144 | !old_valid_dev(stat->rdev)) | 141 | tmp.st_ino = stat->ino; |
142 | if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | ||
145 | return -EOVERFLOW; | 143 | return -EOVERFLOW; |
146 | if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) | 144 | tmp.st_mode = stat->mode; |
145 | tmp.st_nlink = stat->nlink; | ||
146 | if (tmp.st_nlink != stat->nlink) | ||
147 | return -EOVERFLOW; | 147 | return -EOVERFLOW; |
148 | 148 | SET_UID(tmp.st_uid, stat->uid); | |
149 | if (clear_user(ubuf, sizeof(*ubuf))) | 149 | SET_GID(tmp.st_gid, stat->gid); |
150 | return -EFAULT; | 150 | tmp.st_rdev = old_encode_dev(stat->rdev); |
151 | 151 | if ((u64) stat->size > MAX_NON_LFS) | |
152 | err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev); | 152 | return -EOVERFLOW; |
153 | err |= __put_user(ino, &ubuf->st_ino); | 153 | tmp.st_size = stat->size; |
154 | err |= __put_user(stat->mode, &ubuf->st_mode); | 154 | tmp.st_atime = stat->atime.tv_sec; |
155 | err |= __put_user(stat->nlink, &ubuf->st_nlink); | 155 | tmp.st_atime_nsec = stat->atime.tv_nsec; |
156 | err |= __put_user(uid, &ubuf->st_uid); | 156 | tmp.st_mtime = stat->mtime.tv_sec; |
157 | err |= __put_user(gid, &ubuf->st_gid); | 157 | tmp.st_mtime_nsec = stat->mtime.tv_nsec; |
158 | err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev); | 158 | tmp.st_ctime = stat->ctime.tv_sec; |
159 | err |= __put_user(stat->size, &ubuf->st_size); | 159 | tmp.st_ctime_nsec = stat->ctime.tv_nsec; |
160 | err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime); | 160 | tmp.st_blocks = stat->blocks; |
161 | err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec); | 161 | tmp.st_blksize = stat->blksize; |
162 | err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime); | 162 | return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
163 | err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec); | ||
164 | err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime); | ||
165 | err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec); | ||
166 | err |= __put_user(stat->blksize, &ubuf->st_blksize); | ||
167 | err |= __put_user(stat->blocks, &ubuf->st_blocks); | ||
168 | return err; | ||
169 | } | 163 | } |
170 | 164 | ||
171 | asmlinkage long compat_sys_newstat(const char __user * filename, | 165 | asmlinkage long compat_sys_newstat(const char __user * filename, |
diff --git a/fs/dcache.c b/fs/dcache.c index 16a53cc2cc02..bcbdb33fcc20 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -104,7 +104,7 @@ static unsigned int d_hash_shift __read_mostly; | |||
104 | 104 | ||
105 | static struct hlist_bl_head *dentry_hashtable __read_mostly; | 105 | static struct hlist_bl_head *dentry_hashtable __read_mostly; |
106 | 106 | ||
107 | static inline struct hlist_bl_head *d_hash(struct dentry *parent, | 107 | static inline struct hlist_bl_head *d_hash(const struct dentry *parent, |
108 | unsigned long hash) | 108 | unsigned long hash) |
109 | { | 109 | { |
110 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; | 110 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; |
@@ -137,6 +137,26 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, | |||
137 | } | 137 | } |
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | /* | ||
141 | * Compare 2 name strings, return 0 if they match, otherwise non-zero. | ||
142 | * The strings are both count bytes long, and count is non-zero. | ||
143 | */ | ||
144 | static inline int dentry_cmp(const unsigned char *cs, size_t scount, | ||
145 | const unsigned char *ct, size_t tcount) | ||
146 | { | ||
147 | if (scount != tcount) | ||
148 | return 1; | ||
149 | |||
150 | do { | ||
151 | if (*cs != *ct) | ||
152 | return 1; | ||
153 | cs++; | ||
154 | ct++; | ||
155 | tcount--; | ||
156 | } while (tcount); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
140 | static void __d_free(struct rcu_head *head) | 160 | static void __d_free(struct rcu_head *head) |
141 | { | 161 | { |
142 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); | 162 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); |
@@ -1717,8 +1737,9 @@ EXPORT_SYMBOL(d_add_ci); | |||
1717 | * child is looked up. Thus, an interlocking stepping of sequence lock checks | 1737 | * child is looked up. Thus, an interlocking stepping of sequence lock checks |
1718 | * is formed, giving integrity down the path walk. | 1738 | * is formed, giving integrity down the path walk. |
1719 | */ | 1739 | */ |
1720 | struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | 1740 | struct dentry *__d_lookup_rcu(const struct dentry *parent, |
1721 | unsigned *seq, struct inode **inode) | 1741 | const struct qstr *name, |
1742 | unsigned *seqp, struct inode **inode) | ||
1722 | { | 1743 | { |
1723 | unsigned int len = name->len; | 1744 | unsigned int len = name->len; |
1724 | unsigned int hash = name->hash; | 1745 | unsigned int hash = name->hash; |
@@ -1748,6 +1769,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | |||
1748 | * See Documentation/filesystems/path-lookup.txt for more details. | 1769 | * See Documentation/filesystems/path-lookup.txt for more details. |
1749 | */ | 1770 | */ |
1750 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { | 1771 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
1772 | unsigned seq; | ||
1751 | struct inode *i; | 1773 | struct inode *i; |
1752 | const char *tname; | 1774 | const char *tname; |
1753 | int tlen; | 1775 | int tlen; |
@@ -1756,7 +1778,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | |||
1756 | continue; | 1778 | continue; |
1757 | 1779 | ||
1758 | seqretry: | 1780 | seqretry: |
1759 | *seq = read_seqcount_begin(&dentry->d_seq); | 1781 | seq = read_seqcount_begin(&dentry->d_seq); |
1760 | if (dentry->d_parent != parent) | 1782 | if (dentry->d_parent != parent) |
1761 | continue; | 1783 | continue; |
1762 | if (d_unhashed(dentry)) | 1784 | if (d_unhashed(dentry)) |
@@ -1771,7 +1793,7 @@ seqretry: | |||
1771 | * edge of memory when walking. If we could load this | 1793 | * edge of memory when walking. If we could load this |
1772 | * atomically some other way, we could drop this check. | 1794 | * atomically some other way, we could drop this check. |
1773 | */ | 1795 | */ |
1774 | if (read_seqcount_retry(&dentry->d_seq, *seq)) | 1796 | if (read_seqcount_retry(&dentry->d_seq, seq)) |
1775 | goto seqretry; | 1797 | goto seqretry; |
1776 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { | 1798 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { |
1777 | if (parent->d_op->d_compare(parent, *inode, | 1799 | if (parent->d_op->d_compare(parent, *inode, |
@@ -1788,6 +1810,7 @@ seqretry: | |||
1788 | * order to do anything useful with the returned dentry | 1810 | * order to do anything useful with the returned dentry |
1789 | * anyway. | 1811 | * anyway. |
1790 | */ | 1812 | */ |
1813 | *seqp = seq; | ||
1791 | *inode = i; | 1814 | *inode = i; |
1792 | return dentry; | 1815 | return dentry; |
1793 | } | 1816 | } |
@@ -2968,7 +2991,7 @@ __setup("dhash_entries=", set_dhash_entries); | |||
2968 | 2991 | ||
2969 | static void __init dcache_init_early(void) | 2992 | static void __init dcache_init_early(void) |
2970 | { | 2993 | { |
2971 | int loop; | 2994 | unsigned int loop; |
2972 | 2995 | ||
2973 | /* If hashes are distributed across NUMA nodes, defer | 2996 | /* If hashes are distributed across NUMA nodes, defer |
2974 | * hash allocation until vmalloc space is available. | 2997 | * hash allocation until vmalloc space is available. |
@@ -2986,13 +3009,13 @@ static void __init dcache_init_early(void) | |||
2986 | &d_hash_mask, | 3009 | &d_hash_mask, |
2987 | 0); | 3010 | 0); |
2988 | 3011 | ||
2989 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 3012 | for (loop = 0; loop < (1U << d_hash_shift); loop++) |
2990 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); | 3013 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
2991 | } | 3014 | } |
2992 | 3015 | ||
2993 | static void __init dcache_init(void) | 3016 | static void __init dcache_init(void) |
2994 | { | 3017 | { |
2995 | int loop; | 3018 | unsigned int loop; |
2996 | 3019 | ||
2997 | /* | 3020 | /* |
2998 | * A constructor could be added for stable state like the lists, | 3021 | * A constructor could be added for stable state like the lists, |
@@ -3016,7 +3039,7 @@ static void __init dcache_init(void) | |||
3016 | &d_hash_mask, | 3039 | &d_hash_mask, |
3017 | 0); | 3040 | 0); |
3018 | 3041 | ||
3019 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 3042 | for (loop = 0; loop < (1U << d_hash_shift); loop++) |
3020 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); | 3043 | INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
3021 | } | 3044 | } |
3022 | 3045 | ||
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index f65d4455c5e5..ef023eef0464 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
@@ -540,7 +540,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_blob); | |||
540 | * debugfs_print_regs32 - use seq_print to describe a set of registers | 540 | * debugfs_print_regs32 - use seq_print to describe a set of registers |
541 | * @s: the seq_file structure being used to generate output | 541 | * @s: the seq_file structure being used to generate output |
542 | * @regs: an array if struct debugfs_reg32 structures | 542 | * @regs: an array if struct debugfs_reg32 structures |
543 | * @mregs: the length of the above array | 543 | * @nregs: the length of the above array |
544 | * @base: the base address to be used in reading the registers | 544 | * @base: the base address to be used in reading the registers |
545 | * @prefix: a string to be prefixed to every output line | 545 | * @prefix: a string to be prefixed to every output line |
546 | * | 546 | * |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 4a588dbd11bf..f4aadd15b613 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -173,7 +173,7 @@ void inode_dio_wait(struct inode *inode) | |||
173 | if (atomic_read(&inode->i_dio_count)) | 173 | if (atomic_read(&inode->i_dio_count)) |
174 | __inode_dio_wait(inode); | 174 | __inode_dio_wait(inode); |
175 | } | 175 | } |
176 | EXPORT_SYMBOL_GPL(inode_dio_wait); | 176 | EXPORT_SYMBOL(inode_dio_wait); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * inode_dio_done - signal finish of a direct I/O requests | 179 | * inode_dio_done - signal finish of a direct I/O requests |
@@ -187,7 +187,7 @@ void inode_dio_done(struct inode *inode) | |||
187 | if (atomic_dec_and_test(&inode->i_dio_count)) | 187 | if (atomic_dec_and_test(&inode->i_dio_count)) |
188 | wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); | 188 | wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); |
189 | } | 189 | } |
190 | EXPORT_SYMBOL_GPL(inode_dio_done); | 190 | EXPORT_SYMBOL(inode_dio_done); |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * How many pages are in the queue? | 193 | * How many pages are in the queue? |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 2a834255c75d..ea9931281557 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, | |||
417 | (unsigned long long)(extent_base + extent_offset), rc); | 417 | (unsigned long long)(extent_base + extent_offset), rc); |
418 | goto out; | 418 | goto out; |
419 | } | 419 | } |
420 | if (unlikely(ecryptfs_verbosity > 0)) { | ||
421 | ecryptfs_printk(KERN_DEBUG, "Encrypting extent " | ||
422 | "with iv:\n"); | ||
423 | ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes); | ||
424 | ecryptfs_printk(KERN_DEBUG, "First 8 bytes before " | ||
425 | "encryption:\n"); | ||
426 | ecryptfs_dump_hex((char *) | ||
427 | (page_address(page) | ||
428 | + (extent_offset * crypt_stat->extent_size)), | ||
429 | 8); | ||
430 | } | ||
431 | rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0, | 420 | rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0, |
432 | page, (extent_offset | 421 | page, (extent_offset |
433 | * crypt_stat->extent_size), | 422 | * crypt_stat->extent_size), |
@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, | |||
440 | goto out; | 429 | goto out; |
441 | } | 430 | } |
442 | rc = 0; | 431 | rc = 0; |
443 | if (unlikely(ecryptfs_verbosity > 0)) { | ||
444 | ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; " | ||
445 | "rc = [%d]\n", | ||
446 | (unsigned long long)(extent_base + extent_offset), rc); | ||
447 | ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " | ||
448 | "encryption:\n"); | ||
449 | ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8); | ||
450 | } | ||
451 | out: | 432 | out: |
452 | return rc; | 433 | return rc; |
453 | } | 434 | } |
@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page, | |||
543 | (unsigned long long)(extent_base + extent_offset), rc); | 524 | (unsigned long long)(extent_base + extent_offset), rc); |
544 | goto out; | 525 | goto out; |
545 | } | 526 | } |
546 | if (unlikely(ecryptfs_verbosity > 0)) { | ||
547 | ecryptfs_printk(KERN_DEBUG, "Decrypting extent " | ||
548 | "with iv:\n"); | ||
549 | ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes); | ||
550 | ecryptfs_printk(KERN_DEBUG, "First 8 bytes before " | ||
551 | "decryption:\n"); | ||
552 | ecryptfs_dump_hex((char *) | ||
553 | (page_address(enc_extent_page) | ||
554 | + (extent_offset * crypt_stat->extent_size)), | ||
555 | 8); | ||
556 | } | ||
557 | rc = ecryptfs_decrypt_page_offset(crypt_stat, page, | 527 | rc = ecryptfs_decrypt_page_offset(crypt_stat, page, |
558 | (extent_offset | 528 | (extent_offset |
559 | * crypt_stat->extent_size), | 529 | * crypt_stat->extent_size), |
@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page, | |||
567 | goto out; | 537 | goto out; |
568 | } | 538 | } |
569 | rc = 0; | 539 | rc = 0; |
570 | if (unlikely(ecryptfs_verbosity > 0)) { | ||
571 | ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; " | ||
572 | "rc = [%d]\n", | ||
573 | (unsigned long long)(extent_base + extent_offset), rc); | ||
574 | ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " | ||
575 | "decryption:\n"); | ||
576 | ecryptfs_dump_hex((char *)(page_address(page) | ||
577 | + (extent_offset | ||
578 | * crypt_stat->extent_size)), 8); | ||
579 | } | ||
580 | out: | 540 | out: |
581 | return rc; | 541 | return rc; |
582 | } | 542 | } |
@@ -1590,8 +1550,8 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry, | |||
1590 | */ | 1550 | */ |
1591 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | 1551 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) |
1592 | { | 1552 | { |
1593 | int rc = 0; | 1553 | int rc; |
1594 | char *page_virt = NULL; | 1554 | char *page_virt; |
1595 | struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; | 1555 | struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; |
1596 | struct ecryptfs_crypt_stat *crypt_stat = | 1556 | struct ecryptfs_crypt_stat *crypt_stat = |
1597 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; | 1557 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
@@ -1616,11 +1576,13 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
1616 | ecryptfs_dentry, | 1576 | ecryptfs_dentry, |
1617 | ECRYPTFS_VALIDATE_HEADER_SIZE); | 1577 | ECRYPTFS_VALIDATE_HEADER_SIZE); |
1618 | if (rc) { | 1578 | if (rc) { |
1579 | /* metadata is not in the file header, so try xattrs */ | ||
1619 | memset(page_virt, 0, PAGE_CACHE_SIZE); | 1580 | memset(page_virt, 0, PAGE_CACHE_SIZE); |
1620 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); | 1581 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); |
1621 | if (rc) { | 1582 | if (rc) { |
1622 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " | 1583 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " |
1623 | "file header region or xattr region\n"); | 1584 | "file header region or xattr region, inode %lu\n", |
1585 | ecryptfs_inode->i_ino); | ||
1624 | rc = -EINVAL; | 1586 | rc = -EINVAL; |
1625 | goto out; | 1587 | goto out; |
1626 | } | 1588 | } |
@@ -1629,7 +1591,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
1629 | ECRYPTFS_DONT_VALIDATE_HEADER_SIZE); | 1591 | ECRYPTFS_DONT_VALIDATE_HEADER_SIZE); |
1630 | if (rc) { | 1592 | if (rc) { |
1631 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " | 1593 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " |
1632 | "file xattr region either\n"); | 1594 | "file xattr region either, inode %lu\n", |
1595 | ecryptfs_inode->i_ino); | ||
1633 | rc = -EINVAL; | 1596 | rc = -EINVAL; |
1634 | } | 1597 | } |
1635 | if (crypt_stat->mount_crypt_stat->flags | 1598 | if (crypt_stat->mount_crypt_stat->flags |
@@ -1640,7 +1603,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
1640 | "crypto metadata only in the extended attribute " | 1603 | "crypto metadata only in the extended attribute " |
1641 | "region, but eCryptfs was mounted without " | 1604 | "region, but eCryptfs was mounted without " |
1642 | "xattr support enabled. eCryptfs will not treat " | 1605 | "xattr support enabled. eCryptfs will not treat " |
1643 | "this like an encrypted file.\n"); | 1606 | "this like an encrypted file, inode %lu\n", |
1607 | ecryptfs_inode->i_ino); | ||
1644 | rc = -EINVAL; | 1608 | rc = -EINVAL; |
1645 | } | 1609 | } |
1646 | } | 1610 | } |
@@ -2026,6 +1990,17 @@ out: | |||
2026 | return; | 1990 | return; |
2027 | } | 1991 | } |
2028 | 1992 | ||
1993 | static size_t ecryptfs_max_decoded_size(size_t encoded_size) | ||
1994 | { | ||
1995 | /* Not exact; conservatively long. Every block of 4 | ||
1996 | * encoded characters decodes into a block of 3 | ||
1997 | * decoded characters. This segment of code provides | ||
1998 | * the caller with the maximum amount of allocated | ||
1999 | * space that @dst will need to point to in a | ||
2000 | * subsequent call. */ | ||
2001 | return ((encoded_size + 1) * 3) / 4; | ||
2002 | } | ||
2003 | |||
2029 | /** | 2004 | /** |
2030 | * ecryptfs_decode_from_filename | 2005 | * ecryptfs_decode_from_filename |
2031 | * @dst: If NULL, this function only sets @dst_size and returns. If | 2006 | * @dst: If NULL, this function only sets @dst_size and returns. If |
@@ -2044,13 +2019,7 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, | |||
2044 | size_t dst_byte_offset = 0; | 2019 | size_t dst_byte_offset = 0; |
2045 | 2020 | ||
2046 | if (dst == NULL) { | 2021 | if (dst == NULL) { |
2047 | /* Not exact; conservatively long. Every block of 4 | 2022 | (*dst_size) = ecryptfs_max_decoded_size(src_size); |
2048 | * encoded characters decodes into a block of 3 | ||
2049 | * decoded characters. This segment of code provides | ||
2050 | * the caller with the maximum amount of allocated | ||
2051 | * space that @dst will need to point to in a | ||
2052 | * subsequent call. */ | ||
2053 | (*dst_size) = (((src_size + 1) * 3) / 4); | ||
2054 | goto out; | 2023 | goto out; |
2055 | } | 2024 | } |
2056 | while (src_byte_offset < src_size) { | 2025 | while (src_byte_offset < src_size) { |
@@ -2275,3 +2244,52 @@ out_free: | |||
2275 | out: | 2244 | out: |
2276 | return rc; | 2245 | return rc; |
2277 | } | 2246 | } |
2247 | |||
2248 | #define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143 | ||
2249 | |||
2250 | int ecryptfs_set_f_namelen(long *namelen, long lower_namelen, | ||
2251 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat) | ||
2252 | { | ||
2253 | struct blkcipher_desc desc; | ||
2254 | struct mutex *tfm_mutex; | ||
2255 | size_t cipher_blocksize; | ||
2256 | int rc; | ||
2257 | |||
2258 | if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { | ||
2259 | (*namelen) = lower_namelen; | ||
2260 | return 0; | ||
2261 | } | ||
2262 | |||
2263 | rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex, | ||
2264 | mount_crypt_stat->global_default_fn_cipher_name); | ||
2265 | if (unlikely(rc)) { | ||
2266 | (*namelen) = 0; | ||
2267 | return rc; | ||
2268 | } | ||
2269 | |||
2270 | mutex_lock(tfm_mutex); | ||
2271 | cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm); | ||
2272 | mutex_unlock(tfm_mutex); | ||
2273 | |||
2274 | /* Return an exact amount for the common cases */ | ||
2275 | if (lower_namelen == NAME_MAX | ||
2276 | && (cipher_blocksize == 8 || cipher_blocksize == 16)) { | ||
2277 | (*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16; | ||
2278 | return 0; | ||
2279 | } | ||
2280 | |||
2281 | /* Return a safe estimate for the uncommon cases */ | ||
2282 | (*namelen) = lower_namelen; | ||
2283 | (*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; | ||
2284 | /* Since this is the max decoded size, subtract 1 "decoded block" len */ | ||
2285 | (*namelen) = ecryptfs_max_decoded_size(*namelen) - 3; | ||
2286 | (*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE; | ||
2287 | (*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES; | ||
2288 | /* Worst case is that the filename is padded nearly a full block size */ | ||
2289 | (*namelen) -= cipher_blocksize - 1; | ||
2290 | |||
2291 | if ((*namelen) < 0) | ||
2292 | (*namelen) = 0; | ||
2293 | |||
2294 | return 0; | ||
2295 | } | ||
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index a9f29b12fbf2..867b64c5d84f 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -151,12 +151,21 @@ ecryptfs_get_key_payload_data(struct key *key) | |||
151 | * dentry name */ | 151 | * dentry name */ |
152 | #define ECRYPTFS_TAG_73_PACKET_TYPE 0x49 /* FEK-encrypted filename as | 152 | #define ECRYPTFS_TAG_73_PACKET_TYPE 0x49 /* FEK-encrypted filename as |
153 | * metadata */ | 153 | * metadata */ |
154 | #define ECRYPTFS_MIN_PKT_LEN_SIZE 1 /* Min size to specify packet length */ | ||
155 | #define ECRYPTFS_MAX_PKT_LEN_SIZE 2 /* Pass at least this many bytes to | ||
156 | * ecryptfs_parse_packet_length() and | ||
157 | * ecryptfs_write_packet_length() | ||
158 | */ | ||
154 | /* Constraint: ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES >= | 159 | /* Constraint: ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES >= |
155 | * ECRYPTFS_MAX_IV_BYTES */ | 160 | * ECRYPTFS_MAX_IV_BYTES */ |
156 | #define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16 | 161 | #define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16 |
157 | #define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */ | 162 | #define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */ |
158 | #define MD5_DIGEST_SIZE 16 | 163 | #define MD5_DIGEST_SIZE 16 |
159 | #define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE | 164 | #define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE |
165 | #define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \ | ||
166 | + ECRYPTFS_SIG_SIZE + 1 + 1) | ||
167 | #define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \ | ||
168 | + ECRYPTFS_SIG_SIZE + 1 + 1) | ||
160 | #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FEK_ENCRYPTED." | 169 | #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FEK_ENCRYPTED." |
161 | #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE 23 | 170 | #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE 23 |
162 | #define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FNEK_ENCRYPTED." | 171 | #define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FNEK_ENCRYPTED." |
@@ -696,6 +705,8 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
696 | size_t *packet_size, | 705 | size_t *packet_size, |
697 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, | 706 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, |
698 | char *data, size_t max_packet_size); | 707 | char *data, size_t max_packet_size); |
708 | int ecryptfs_set_f_namelen(long *namelen, long lower_namelen, | ||
709 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat); | ||
699 | int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, | 710 | int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, |
700 | loff_t offset); | 711 | loff_t offset); |
701 | 712 | ||
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 19a8ca4ab1dd..ab35b113003b 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -822,18 +822,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, | |||
822 | size_t num_zeros = (PAGE_CACHE_SIZE | 822 | size_t num_zeros = (PAGE_CACHE_SIZE |
823 | - (ia->ia_size & ~PAGE_CACHE_MASK)); | 823 | - (ia->ia_size & ~PAGE_CACHE_MASK)); |
824 | 824 | ||
825 | |||
826 | /* | ||
827 | * XXX(truncate) this should really happen at the begginning | ||
828 | * of ->setattr. But the code is too messy to that as part | ||
829 | * of a larger patch. ecryptfs is also totally missing out | ||
830 | * on the inode_change_ok check at the beginning of | ||
831 | * ->setattr while would include this. | ||
832 | */ | ||
833 | rc = inode_newsize_ok(inode, ia->ia_size); | ||
834 | if (rc) | ||
835 | goto out; | ||
836 | |||
837 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { | 825 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { |
838 | truncate_setsize(inode, ia->ia_size); | 826 | truncate_setsize(inode, ia->ia_size); |
839 | lower_ia->ia_size = ia->ia_size; | 827 | lower_ia->ia_size = ia->ia_size; |
@@ -883,6 +871,28 @@ out: | |||
883 | return rc; | 871 | return rc; |
884 | } | 872 | } |
885 | 873 | ||
874 | static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset) | ||
875 | { | ||
876 | struct ecryptfs_crypt_stat *crypt_stat; | ||
877 | loff_t lower_oldsize, lower_newsize; | ||
878 | |||
879 | crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; | ||
880 | lower_oldsize = upper_size_to_lower_size(crypt_stat, | ||
881 | i_size_read(inode)); | ||
882 | lower_newsize = upper_size_to_lower_size(crypt_stat, offset); | ||
883 | if (lower_newsize > lower_oldsize) { | ||
884 | /* | ||
885 | * The eCryptfs inode and the new *lower* size are mixed here | ||
886 | * because we may not have the lower i_mutex held and/or it may | ||
887 | * not be appropriate to call inode_newsize_ok() with inodes | ||
888 | * from other filesystems. | ||
889 | */ | ||
890 | return inode_newsize_ok(inode, lower_newsize); | ||
891 | } | ||
892 | |||
893 | return 0; | ||
894 | } | ||
895 | |||
886 | /** | 896 | /** |
887 | * ecryptfs_truncate | 897 | * ecryptfs_truncate |
888 | * @dentry: The ecryptfs layer dentry | 898 | * @dentry: The ecryptfs layer dentry |
@@ -899,6 +909,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) | |||
899 | struct iattr lower_ia = { .ia_valid = 0 }; | 909 | struct iattr lower_ia = { .ia_valid = 0 }; |
900 | int rc; | 910 | int rc; |
901 | 911 | ||
912 | rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length); | ||
913 | if (rc) | ||
914 | return rc; | ||
915 | |||
902 | rc = truncate_upper(dentry, &ia, &lower_ia); | 916 | rc = truncate_upper(dentry, &ia, &lower_ia); |
903 | if (!rc && lower_ia.ia_valid & ATTR_SIZE) { | 917 | if (!rc && lower_ia.ia_valid & ATTR_SIZE) { |
904 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); | 918 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); |
@@ -978,6 +992,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) | |||
978 | } | 992 | } |
979 | } | 993 | } |
980 | mutex_unlock(&crypt_stat->cs_mutex); | 994 | mutex_unlock(&crypt_stat->cs_mutex); |
995 | |||
996 | rc = inode_change_ok(inode, ia); | ||
997 | if (rc) | ||
998 | goto out; | ||
999 | if (ia->ia_valid & ATTR_SIZE) { | ||
1000 | rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size); | ||
1001 | if (rc) | ||
1002 | goto out; | ||
1003 | } | ||
1004 | |||
981 | if (S_ISREG(inode->i_mode)) { | 1005 | if (S_ISREG(inode->i_mode)) { |
982 | rc = filemap_write_and_wait(inode->i_mapping); | 1006 | rc = filemap_write_and_wait(inode->i_mapping); |
983 | if (rc) | 1007 | if (rc) |
@@ -1061,6 +1085,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
1061 | } | 1085 | } |
1062 | 1086 | ||
1063 | rc = vfs_setxattr(lower_dentry, name, value, size, flags); | 1087 | rc = vfs_setxattr(lower_dentry, name, value, size, flags); |
1088 | if (!rc) | ||
1089 | fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode); | ||
1064 | out: | 1090 | out: |
1065 | return rc; | 1091 | return rc; |
1066 | } | 1092 | } |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index ac1ad48c2376..2333203a120b 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -109,7 +109,7 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, | |||
109 | (*size) += ((unsigned char)(data[1]) + 192); | 109 | (*size) += ((unsigned char)(data[1]) + 192); |
110 | (*length_size) = 2; | 110 | (*length_size) = 2; |
111 | } else if (data[0] == 255) { | 111 | } else if (data[0] == 255) { |
112 | /* Five-byte length; we're not supposed to see this */ | 112 | /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */ |
113 | ecryptfs_printk(KERN_ERR, "Five-byte packet length not " | 113 | ecryptfs_printk(KERN_ERR, "Five-byte packet length not " |
114 | "supported\n"); | 114 | "supported\n"); |
115 | rc = -EINVAL; | 115 | rc = -EINVAL; |
@@ -126,7 +126,7 @@ out: | |||
126 | /** | 126 | /** |
127 | * ecryptfs_write_packet_length | 127 | * ecryptfs_write_packet_length |
128 | * @dest: The byte array target into which to write the length. Must | 128 | * @dest: The byte array target into which to write the length. Must |
129 | * have at least 5 bytes allocated. | 129 | * have at least ECRYPTFS_MAX_PKT_LEN_SIZE bytes allocated. |
130 | * @size: The length to write. | 130 | * @size: The length to write. |
131 | * @packet_size_length: The number of bytes used to encode the packet | 131 | * @packet_size_length: The number of bytes used to encode the packet |
132 | * length is written to this address. | 132 | * length is written to this address. |
@@ -146,6 +146,7 @@ int ecryptfs_write_packet_length(char *dest, size_t size, | |||
146 | dest[1] = ((size - 192) % 256); | 146 | dest[1] = ((size - 192) % 256); |
147 | (*packet_size_length) = 2; | 147 | (*packet_size_length) = 2; |
148 | } else { | 148 | } else { |
149 | /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */ | ||
149 | rc = -EINVAL; | 150 | rc = -EINVAL; |
150 | ecryptfs_printk(KERN_WARNING, | 151 | ecryptfs_printk(KERN_WARNING, |
151 | "Unsupported packet size: [%zd]\n", size); | 152 | "Unsupported packet size: [%zd]\n", size); |
@@ -678,10 +679,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, | |||
678 | * Octets N3-N4: Block-aligned encrypted filename | 679 | * Octets N3-N4: Block-aligned encrypted filename |
679 | * - Consists of a minimum number of random characters, a \0 | 680 | * - Consists of a minimum number of random characters, a \0 |
680 | * separator, and then the filename */ | 681 | * separator, and then the filename */ |
681 | s->max_packet_size = (1 /* Tag 70 identifier */ | 682 | s->max_packet_size = (ECRYPTFS_TAG_70_MAX_METADATA_SIZE |
682 | + 3 /* Max Tag 70 packet size */ | ||
683 | + ECRYPTFS_SIG_SIZE /* FNEK sig */ | ||
684 | + 1 /* Cipher identifier */ | ||
685 | + s->block_aligned_filename_size); | 683 | + s->block_aligned_filename_size); |
686 | if (dest == NULL) { | 684 | if (dest == NULL) { |
687 | (*packet_size) = s->max_packet_size; | 685 | (*packet_size) = s->max_packet_size; |
@@ -933,10 +931,10 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
933 | goto out; | 931 | goto out; |
934 | } | 932 | } |
935 | s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 933 | s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
936 | if (max_packet_size < (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1)) { | 934 | if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) { |
937 | printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be " | 935 | printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be " |
938 | "at least [%d]\n", __func__, max_packet_size, | 936 | "at least [%d]\n", __func__, max_packet_size, |
939 | (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1)); | 937 | ECRYPTFS_TAG_70_MIN_METADATA_SIZE); |
940 | rc = -EINVAL; | 938 | rc = -EINVAL; |
941 | goto out; | 939 | goto out; |
942 | } | 940 | } |
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 940a82e63dc3..3a06f4043df4 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c | |||
@@ -218,6 +218,29 @@ out_unlock: | |||
218 | return rc; | 218 | return rc; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* | ||
222 | * miscdevfs packet format: | ||
223 | * Octet 0: Type | ||
224 | * Octets 1-4: network byte order msg_ctx->counter | ||
225 | * Octets 5-N0: Size of struct ecryptfs_message to follow | ||
226 | * Octets N0-N1: struct ecryptfs_message (including data) | ||
227 | * | ||
228 | * Octets 5-N1 not written if the packet type does not include a message | ||
229 | */ | ||
230 | #define PKT_TYPE_SIZE 1 | ||
231 | #define PKT_CTR_SIZE 4 | ||
232 | #define MIN_NON_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE) | ||
233 | #define MIN_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \ | ||
234 | + ECRYPTFS_MIN_PKT_LEN_SIZE) | ||
235 | /* 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES comes from tag 65 packet format */ | ||
236 | #define MAX_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \ | ||
237 | + ECRYPTFS_MAX_PKT_LEN_SIZE \ | ||
238 | + sizeof(struct ecryptfs_message) \ | ||
239 | + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) | ||
240 | #define PKT_TYPE_OFFSET 0 | ||
241 | #define PKT_CTR_OFFSET PKT_TYPE_SIZE | ||
242 | #define PKT_LEN_OFFSET (PKT_TYPE_SIZE + PKT_CTR_SIZE) | ||
243 | |||
221 | /** | 244 | /** |
222 | * ecryptfs_miscdev_read - format and send message from queue | 245 | * ecryptfs_miscdev_read - format and send message from queue |
223 | * @file: fs/ecryptfs/euid miscdevfs handle (ignored) | 246 | * @file: fs/ecryptfs/euid miscdevfs handle (ignored) |
@@ -237,7 +260,7 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, | |||
237 | struct ecryptfs_daemon *daemon; | 260 | struct ecryptfs_daemon *daemon; |
238 | struct ecryptfs_msg_ctx *msg_ctx; | 261 | struct ecryptfs_msg_ctx *msg_ctx; |
239 | size_t packet_length_size; | 262 | size_t packet_length_size; |
240 | char packet_length[3]; | 263 | char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE]; |
241 | size_t i; | 264 | size_t i; |
242 | size_t total_length; | 265 | size_t total_length; |
243 | uid_t euid = current_euid(); | 266 | uid_t euid = current_euid(); |
@@ -305,15 +328,8 @@ check_list: | |||
305 | packet_length_size = 0; | 328 | packet_length_size = 0; |
306 | msg_ctx->msg_size = 0; | 329 | msg_ctx->msg_size = 0; |
307 | } | 330 | } |
308 | /* miscdevfs packet format: | 331 | total_length = (PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_length_size |
309 | * Octet 0: Type | 332 | + msg_ctx->msg_size); |
310 | * Octets 1-4: network byte order msg_ctx->counter | ||
311 | * Octets 5-N0: Size of struct ecryptfs_message to follow | ||
312 | * Octets N0-N1: struct ecryptfs_message (including data) | ||
313 | * | ||
314 | * Octets 5-N1 not written if the packet type does not | ||
315 | * include a message */ | ||
316 | total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size); | ||
317 | if (count < total_length) { | 333 | if (count < total_length) { |
318 | rc = 0; | 334 | rc = 0; |
319 | printk(KERN_WARNING "%s: Only given user buffer of " | 335 | printk(KERN_WARNING "%s: Only given user buffer of " |
@@ -324,9 +340,10 @@ check_list: | |||
324 | rc = -EFAULT; | 340 | rc = -EFAULT; |
325 | if (put_user(msg_ctx->type, buf)) | 341 | if (put_user(msg_ctx->type, buf)) |
326 | goto out_unlock_msg_ctx; | 342 | goto out_unlock_msg_ctx; |
327 | if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(buf + 1))) | 343 | if (put_user(cpu_to_be32(msg_ctx->counter), |
344 | (__be32 __user *)(&buf[PKT_CTR_OFFSET]))) | ||
328 | goto out_unlock_msg_ctx; | 345 | goto out_unlock_msg_ctx; |
329 | i = 5; | 346 | i = PKT_TYPE_SIZE + PKT_CTR_SIZE; |
330 | if (msg_ctx->msg) { | 347 | if (msg_ctx->msg) { |
331 | if (copy_to_user(&buf[i], packet_length, packet_length_size)) | 348 | if (copy_to_user(&buf[i], packet_length, packet_length_size)) |
332 | goto out_unlock_msg_ctx; | 349 | goto out_unlock_msg_ctx; |
@@ -391,12 +408,6 @@ out: | |||
391 | * @count: Amount of data in @buf | 408 | * @count: Amount of data in @buf |
392 | * @ppos: Pointer to offset in file (ignored) | 409 | * @ppos: Pointer to offset in file (ignored) |
393 | * | 410 | * |
394 | * miscdevfs packet format: | ||
395 | * Octet 0: Type | ||
396 | * Octets 1-4: network byte order msg_ctx->counter (0's for non-response) | ||
397 | * Octets 5-N0: Size of struct ecryptfs_message to follow | ||
398 | * Octets N0-N1: struct ecryptfs_message (including data) | ||
399 | * | ||
400 | * Returns the number of bytes read from @buf | 411 | * Returns the number of bytes read from @buf |
401 | */ | 412 | */ |
402 | static ssize_t | 413 | static ssize_t |
@@ -405,60 +416,78 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, | |||
405 | { | 416 | { |
406 | __be32 counter_nbo; | 417 | __be32 counter_nbo; |
407 | u32 seq; | 418 | u32 seq; |
408 | size_t packet_size, packet_size_length, i; | 419 | size_t packet_size, packet_size_length; |
409 | ssize_t sz = 0; | ||
410 | char *data; | 420 | char *data; |
411 | uid_t euid = current_euid(); | 421 | uid_t euid = current_euid(); |
412 | int rc; | 422 | unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE]; |
423 | ssize_t rc; | ||
413 | 424 | ||
414 | if (count == 0) | 425 | if (count == 0) { |
415 | goto out; | 426 | return 0; |
427 | } else if (count == MIN_NON_MSG_PKT_SIZE) { | ||
428 | /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */ | ||
429 | goto memdup; | ||
430 | } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) { | ||
431 | printk(KERN_WARNING "%s: Acceptable packet size range is " | ||
432 | "[%d-%zu], but amount of data written is [%zu].", | ||
433 | __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | |||
437 | if (copy_from_user(packet_size_peek, &buf[PKT_LEN_OFFSET], | ||
438 | sizeof(packet_size_peek))) { | ||
439 | printk(KERN_WARNING "%s: Error while inspecting packet size\n", | ||
440 | __func__); | ||
441 | return -EFAULT; | ||
442 | } | ||
416 | 443 | ||
444 | rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size, | ||
445 | &packet_size_length); | ||
446 | if (rc) { | ||
447 | printk(KERN_WARNING "%s: Error parsing packet length; " | ||
448 | "rc = [%zd]\n", __func__, rc); | ||
449 | return rc; | ||
450 | } | ||
451 | |||
452 | if ((PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_size_length + packet_size) | ||
453 | != count) { | ||
454 | printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__, | ||
455 | packet_size); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | memdup: | ||
417 | data = memdup_user(buf, count); | 460 | data = memdup_user(buf, count); |
418 | if (IS_ERR(data)) { | 461 | if (IS_ERR(data)) { |
419 | printk(KERN_ERR "%s: memdup_user returned error [%ld]\n", | 462 | printk(KERN_ERR "%s: memdup_user returned error [%ld]\n", |
420 | __func__, PTR_ERR(data)); | 463 | __func__, PTR_ERR(data)); |
421 | goto out; | 464 | return PTR_ERR(data); |
422 | } | 465 | } |
423 | sz = count; | 466 | switch (data[PKT_TYPE_OFFSET]) { |
424 | i = 0; | ||
425 | switch (data[i++]) { | ||
426 | case ECRYPTFS_MSG_RESPONSE: | 467 | case ECRYPTFS_MSG_RESPONSE: |
427 | if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) { | 468 | if (count < (MIN_MSG_PKT_SIZE |
469 | + sizeof(struct ecryptfs_message))) { | ||
428 | printk(KERN_WARNING "%s: Minimum acceptable packet " | 470 | printk(KERN_WARNING "%s: Minimum acceptable packet " |
429 | "size is [%zd], but amount of data written is " | 471 | "size is [%zd], but amount of data written is " |
430 | "only [%zd]. Discarding response packet.\n", | 472 | "only [%zd]. Discarding response packet.\n", |
431 | __func__, | 473 | __func__, |
432 | (1 + 4 + 1 + sizeof(struct ecryptfs_message)), | 474 | (MIN_MSG_PKT_SIZE |
433 | count); | 475 | + sizeof(struct ecryptfs_message)), count); |
476 | rc = -EINVAL; | ||
434 | goto out_free; | 477 | goto out_free; |
435 | } | 478 | } |
436 | memcpy(&counter_nbo, &data[i], 4); | 479 | memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE); |
437 | seq = be32_to_cpu(counter_nbo); | 480 | seq = be32_to_cpu(counter_nbo); |
438 | i += 4; | 481 | rc = ecryptfs_miscdev_response( |
439 | rc = ecryptfs_parse_packet_length(&data[i], &packet_size, | 482 | &data[PKT_LEN_OFFSET + packet_size_length], |
440 | &packet_size_length); | 483 | packet_size, euid, current_user_ns(), |
484 | task_pid(current), seq); | ||
441 | if (rc) { | 485 | if (rc) { |
442 | printk(KERN_WARNING "%s: Error parsing packet length; " | ||
443 | "rc = [%d]\n", __func__, rc); | ||
444 | goto out_free; | ||
445 | } | ||
446 | i += packet_size_length; | ||
447 | if ((1 + 4 + packet_size_length + packet_size) != count) { | ||
448 | printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])" | ||
449 | " + packet_size([%zd]))([%zd]) != " | ||
450 | "count([%zd]). Invalid packet format.\n", | ||
451 | __func__, packet_size_length, packet_size, | ||
452 | (1 + packet_size_length + packet_size), count); | ||
453 | goto out_free; | ||
454 | } | ||
455 | rc = ecryptfs_miscdev_response(&data[i], packet_size, | ||
456 | euid, current_user_ns(), | ||
457 | task_pid(current), seq); | ||
458 | if (rc) | ||
459 | printk(KERN_WARNING "%s: Failed to deliver miscdev " | 486 | printk(KERN_WARNING "%s: Failed to deliver miscdev " |
460 | "response to requesting operation; rc = [%d]\n", | 487 | "response to requesting operation; rc = [%zd]\n", |
461 | __func__, rc); | 488 | __func__, rc); |
489 | goto out_free; | ||
490 | } | ||
462 | break; | 491 | break; |
463 | case ECRYPTFS_MSG_HELO: | 492 | case ECRYPTFS_MSG_HELO: |
464 | case ECRYPTFS_MSG_QUIT: | 493 | case ECRYPTFS_MSG_QUIT: |
@@ -467,12 +496,13 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, | |||
467 | ecryptfs_printk(KERN_WARNING, "Dropping miscdev " | 496 | ecryptfs_printk(KERN_WARNING, "Dropping miscdev " |
468 | "message of unrecognized type [%d]\n", | 497 | "message of unrecognized type [%d]\n", |
469 | data[0]); | 498 | data[0]); |
470 | break; | 499 | rc = -EINVAL; |
500 | goto out_free; | ||
471 | } | 501 | } |
502 | rc = count; | ||
472 | out_free: | 503 | out_free: |
473 | kfree(data); | 504 | kfree(data); |
474 | out: | 505 | return rc; |
475 | return sz; | ||
476 | } | 506 | } |
477 | 507 | ||
478 | 508 | ||
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 6a44148c5fb9..a46b3a8fee1e 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -57,6 +57,10 @@ struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) | |||
57 | * @page: Page that is locked before this call is made | 57 | * @page: Page that is locked before this call is made |
58 | * | 58 | * |
59 | * Returns zero on success; non-zero otherwise | 59 | * Returns zero on success; non-zero otherwise |
60 | * | ||
61 | * This is where we encrypt the data and pass the encrypted data to | ||
62 | * the lower filesystem. In OpenPGP-compatible mode, we operate on | ||
63 | * entire underlying packets. | ||
60 | */ | 64 | */ |
61 | static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) | 65 | static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) |
62 | { | 66 | { |
@@ -146,7 +150,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
146 | /* This is a header extent */ | 150 | /* This is a header extent */ |
147 | char *page_virt; | 151 | char *page_virt; |
148 | 152 | ||
149 | page_virt = kmap_atomic(page, KM_USER0); | 153 | page_virt = kmap_atomic(page); |
150 | memset(page_virt, 0, PAGE_CACHE_SIZE); | 154 | memset(page_virt, 0, PAGE_CACHE_SIZE); |
151 | /* TODO: Support more than one header extent */ | 155 | /* TODO: Support more than one header extent */ |
152 | if (view_extent_num == 0) { | 156 | if (view_extent_num == 0) { |
@@ -159,7 +163,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
159 | crypt_stat, | 163 | crypt_stat, |
160 | &written); | 164 | &written); |
161 | } | 165 | } |
162 | kunmap_atomic(page_virt, KM_USER0); | 166 | kunmap_atomic(page_virt); |
163 | flush_dcache_page(page); | 167 | flush_dcache_page(page); |
164 | if (rc) { | 168 | if (rc) { |
165 | printk(KERN_ERR "%s: Error reading xattr " | 169 | printk(KERN_ERR "%s: Error reading xattr " |
@@ -481,10 +485,6 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode) | |||
481 | * @copied: The amount of data copied | 485 | * @copied: The amount of data copied |
482 | * @page: The eCryptfs page | 486 | * @page: The eCryptfs page |
483 | * @fsdata: The fsdata (unused) | 487 | * @fsdata: The fsdata (unused) |
484 | * | ||
485 | * This is where we encrypt the data and pass the encrypted data to | ||
486 | * the lower filesystem. In OpenPGP-compatible mode, we operate on | ||
487 | * entire underlying packets. | ||
488 | */ | 488 | */ |
489 | static int ecryptfs_write_end(struct file *file, | 489 | static int ecryptfs_write_end(struct file *file, |
490 | struct address_space *mapping, | 490 | struct address_space *mapping, |
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 3745f7c2b9c2..b2a34a192f4f 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c | |||
@@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, | |||
130 | pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); | 130 | pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); |
131 | size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); | 131 | size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); |
132 | size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); | 132 | size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); |
133 | size_t total_remaining_bytes = ((offset + size) - pos); | 133 | loff_t total_remaining_bytes = ((offset + size) - pos); |
134 | |||
135 | if (fatal_signal_pending(current)) { | ||
136 | rc = -EINTR; | ||
137 | break; | ||
138 | } | ||
134 | 139 | ||
135 | if (num_bytes > total_remaining_bytes) | 140 | if (num_bytes > total_remaining_bytes) |
136 | num_bytes = total_remaining_bytes; | 141 | num_bytes = total_remaining_bytes; |
137 | if (pos < offset) { | 142 | if (pos < offset) { |
138 | /* remaining zeros to write, up to destination offset */ | 143 | /* remaining zeros to write, up to destination offset */ |
139 | size_t total_remaining_zeros = (offset - pos); | 144 | loff_t total_remaining_zeros = (offset - pos); |
140 | 145 | ||
141 | if (num_bytes > total_remaining_zeros) | 146 | if (num_bytes > total_remaining_zeros) |
142 | num_bytes = total_remaining_zeros; | 147 | num_bytes = total_remaining_zeros; |
@@ -151,7 +156,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, | |||
151 | ecryptfs_page_idx, rc); | 156 | ecryptfs_page_idx, rc); |
152 | goto out; | 157 | goto out; |
153 | } | 158 | } |
154 | ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0); | 159 | ecryptfs_page_virt = kmap_atomic(ecryptfs_page); |
155 | 160 | ||
156 | /* | 161 | /* |
157 | * pos: where we're now writing, offset: where the request was | 162 | * pos: where we're now writing, offset: where the request was |
@@ -174,7 +179,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, | |||
174 | (data + data_offset), num_bytes); | 179 | (data + data_offset), num_bytes); |
175 | data_offset += num_bytes; | 180 | data_offset += num_bytes; |
176 | } | 181 | } |
177 | kunmap_atomic(ecryptfs_page_virt, KM_USER0); | 182 | kunmap_atomic(ecryptfs_page_virt); |
178 | flush_dcache_page(ecryptfs_page); | 183 | flush_dcache_page(ecryptfs_page); |
179 | SetPageUptodate(ecryptfs_page); | 184 | SetPageUptodate(ecryptfs_page); |
180 | unlock_page(ecryptfs_page); | 185 | unlock_page(ecryptfs_page); |
@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, | |||
193 | } | 198 | } |
194 | pos += num_bytes; | 199 | pos += num_bytes; |
195 | } | 200 | } |
196 | if ((offset + size) > ecryptfs_file_size) { | 201 | if (pos > ecryptfs_file_size) { |
197 | i_size_write(ecryptfs_inode, (offset + size)); | 202 | i_size_write(ecryptfs_inode, pos); |
198 | if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) { | 203 | if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) { |
199 | rc = ecryptfs_write_inode_size_to_metadata( | 204 | int rc2; |
205 | |||
206 | rc2 = ecryptfs_write_inode_size_to_metadata( | ||
200 | ecryptfs_inode); | 207 | ecryptfs_inode); |
201 | if (rc) { | 208 | if (rc2) { |
202 | printk(KERN_ERR "Problem with " | 209 | printk(KERN_ERR "Problem with " |
203 | "ecryptfs_write_inode_size_to_metadata; " | 210 | "ecryptfs_write_inode_size_to_metadata; " |
204 | "rc = [%d]\n", rc); | 211 | "rc = [%d]\n", rc2); |
212 | if (!rc) | ||
213 | rc = rc2; | ||
205 | goto out; | 214 | goto out; |
206 | } | 215 | } |
207 | } | 216 | } |
@@ -273,76 +282,3 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, | |||
273 | flush_dcache_page(page_for_ecryptfs); | 282 | flush_dcache_page(page_for_ecryptfs); |
274 | return rc; | 283 | return rc; |
275 | } | 284 | } |
276 | |||
277 | #if 0 | ||
278 | /** | ||
279 | * ecryptfs_read | ||
280 | * @data: The virtual address into which to write the data read (and | ||
281 | * possibly decrypted) from the lower file | ||
282 | * @offset: The offset in the decrypted view of the file from which to | ||
283 | * read into @data | ||
284 | * @size: The number of bytes to read into @data | ||
285 | * @ecryptfs_file: The eCryptfs file from which to read | ||
286 | * | ||
287 | * Read an arbitrary amount of data from an arbitrary location in the | ||
288 | * eCryptfs page cache. This is done on an extent-by-extent basis; | ||
289 | * individual extents are decrypted and read from the lower page | ||
290 | * cache (via VFS reads). This function takes care of all the | ||
291 | * address translation to locations in the lower filesystem. | ||
292 | * | ||
293 | * Returns zero on success; non-zero otherwise | ||
294 | */ | ||
295 | int ecryptfs_read(char *data, loff_t offset, size_t size, | ||
296 | struct file *ecryptfs_file) | ||
297 | { | ||
298 | struct inode *ecryptfs_inode = ecryptfs_file->f_dentry->d_inode; | ||
299 | struct page *ecryptfs_page; | ||
300 | char *ecryptfs_page_virt; | ||
301 | loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode); | ||
302 | loff_t data_offset = 0; | ||
303 | loff_t pos; | ||
304 | int rc = 0; | ||
305 | |||
306 | if ((offset + size) > ecryptfs_file_size) { | ||
307 | rc = -EINVAL; | ||
308 | printk(KERN_ERR "%s: Attempt to read data past the end of the " | ||
309 | "file; offset = [%lld]; size = [%td]; " | ||
310 | "ecryptfs_file_size = [%lld]\n", | ||
311 | __func__, offset, size, ecryptfs_file_size); | ||
312 | goto out; | ||
313 | } | ||
314 | pos = offset; | ||
315 | while (pos < (offset + size)) { | ||
316 | pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); | ||
317 | size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); | ||
318 | size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); | ||
319 | size_t total_remaining_bytes = ((offset + size) - pos); | ||
320 | |||
321 | if (num_bytes > total_remaining_bytes) | ||
322 | num_bytes = total_remaining_bytes; | ||
323 | ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode, | ||
324 | ecryptfs_page_idx); | ||
325 | if (IS_ERR(ecryptfs_page)) { | ||
326 | rc = PTR_ERR(ecryptfs_page); | ||
327 | printk(KERN_ERR "%s: Error getting page at " | ||
328 | "index [%ld] from eCryptfs inode " | ||
329 | "mapping; rc = [%d]\n", __func__, | ||
330 | ecryptfs_page_idx, rc); | ||
331 | goto out; | ||
332 | } | ||
333 | ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0); | ||
334 | memcpy((data + data_offset), | ||
335 | ((char *)ecryptfs_page_virt + start_offset_in_page), | ||
336 | num_bytes); | ||
337 | kunmap_atomic(ecryptfs_page_virt, KM_USER0); | ||
338 | flush_dcache_page(ecryptfs_page); | ||
339 | SetPageUptodate(ecryptfs_page); | ||
340 | unlock_page(ecryptfs_page); | ||
341 | page_cache_release(ecryptfs_page); | ||
342 | pos += num_bytes; | ||
343 | data_offset += num_bytes; | ||
344 | } | ||
345 | out: | ||
346 | return rc; | ||
347 | } | ||
348 | #endif /* 0 */ | ||
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 9df7fd6e0c39..cf152823bbf4 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/file.h> | 31 | #include <linux/file.h> |
32 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
33 | #include <linux/statfs.h> | ||
34 | #include <linux/magic.h> | ||
33 | #include "ecryptfs_kernel.h" | 35 | #include "ecryptfs_kernel.h" |
34 | 36 | ||
35 | struct kmem_cache *ecryptfs_inode_info_cache; | 37 | struct kmem_cache *ecryptfs_inode_info_cache; |
@@ -102,10 +104,20 @@ static void ecryptfs_destroy_inode(struct inode *inode) | |||
102 | static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 104 | static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
103 | { | 105 | { |
104 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); | 106 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); |
107 | int rc; | ||
105 | 108 | ||
106 | if (!lower_dentry->d_sb->s_op->statfs) | 109 | if (!lower_dentry->d_sb->s_op->statfs) |
107 | return -ENOSYS; | 110 | return -ENOSYS; |
108 | return lower_dentry->d_sb->s_op->statfs(lower_dentry, buf); | 111 | |
112 | rc = lower_dentry->d_sb->s_op->statfs(lower_dentry, buf); | ||
113 | if (rc) | ||
114 | return rc; | ||
115 | |||
116 | buf->f_type = ECRYPTFS_SUPER_MAGIC; | ||
117 | rc = ecryptfs_set_f_namelen(&buf->f_namelen, buf->f_namelen, | ||
118 | &ecryptfs_superblock_to_private(dentry->d_sb)->mount_crypt_stat); | ||
119 | |||
120 | return rc; | ||
109 | } | 121 | } |
110 | 122 | ||
111 | /** | 123 | /** |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aabdfc38cf24..4d9d3a45e356 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -320,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p) | |||
320 | return !list_empty(p); | 320 | return !list_empty(p); |
321 | } | 321 | } |
322 | 322 | ||
323 | static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) | ||
324 | { | ||
325 | return container_of(p, struct eppoll_entry, wait); | ||
326 | } | ||
327 | |||
323 | /* Get the "struct epitem" from a wait queue pointer */ | 328 | /* Get the "struct epitem" from a wait queue pointer */ |
324 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) | 329 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) |
325 | { | 330 | { |
@@ -467,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq) | |||
467 | put_cpu(); | 472 | put_cpu(); |
468 | } | 473 | } |
469 | 474 | ||
475 | static void ep_remove_wait_queue(struct eppoll_entry *pwq) | ||
476 | { | ||
477 | wait_queue_head_t *whead; | ||
478 | |||
479 | rcu_read_lock(); | ||
480 | /* If it is cleared by POLLFREE, it should be rcu-safe */ | ||
481 | whead = rcu_dereference(pwq->whead); | ||
482 | if (whead) | ||
483 | remove_wait_queue(whead, &pwq->wait); | ||
484 | rcu_read_unlock(); | ||
485 | } | ||
486 | |||
470 | /* | 487 | /* |
471 | * This function unregisters poll callbacks from the associated file | 488 | * This function unregisters poll callbacks from the associated file |
472 | * descriptor. Must be called with "mtx" held (or "epmutex" if called from | 489 | * descriptor. Must be called with "mtx" held (or "epmutex" if called from |
@@ -481,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) | |||
481 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); | 498 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); |
482 | 499 | ||
483 | list_del(&pwq->llink); | 500 | list_del(&pwq->llink); |
484 | remove_wait_queue(pwq->whead, &pwq->wait); | 501 | ep_remove_wait_queue(pwq); |
485 | kmem_cache_free(pwq_cache, pwq); | 502 | kmem_cache_free(pwq_cache, pwq); |
486 | } | 503 | } |
487 | } | 504 | } |
@@ -842,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
842 | struct epitem *epi = ep_item_from_wait(wait); | 859 | struct epitem *epi = ep_item_from_wait(wait); |
843 | struct eventpoll *ep = epi->ep; | 860 | struct eventpoll *ep = epi->ep; |
844 | 861 | ||
862 | if ((unsigned long)key & POLLFREE) { | ||
863 | ep_pwq_from_wait(wait)->whead = NULL; | ||
864 | /* | ||
865 | * whead = NULL above can race with ep_remove_wait_queue() | ||
866 | * which can do another remove_wait_queue() after us, so we | ||
867 | * can't use __remove_wait_queue(). whead->lock is held by | ||
868 | * the caller. | ||
869 | */ | ||
870 | list_del_init(&wait->task_list); | ||
871 | } | ||
872 | |||
845 | spin_lock_irqsave(&ep->lock, flags); | 873 | spin_lock_irqsave(&ep->lock, flags); |
846 | 874 | ||
847 | /* | 875 | /* |
@@ -960,6 +988,10 @@ static int path_count[PATH_ARR_SIZE]; | |||
960 | 988 | ||
961 | static int path_count_inc(int nests) | 989 | static int path_count_inc(int nests) |
962 | { | 990 | { |
991 | /* Allow an arbitrary number of depth 1 paths */ | ||
992 | if (nests == 0) | ||
993 | return 0; | ||
994 | |||
963 | if (++path_count[nests] > path_limits[nests]) | 995 | if (++path_count[nests] > path_limits[nests]) |
964 | return -1; | 996 | return -1; |
965 | return 0; | 997 | return 0; |
@@ -1071,6 +1071,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) | |||
1071 | perf_event_comm(tsk); | 1071 | perf_event_comm(tsk); |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len) | ||
1075 | { | ||
1076 | int i, ch; | ||
1077 | |||
1078 | /* Copies the binary name from after last slash */ | ||
1079 | for (i = 0; (ch = *(fn++)) != '\0';) { | ||
1080 | if (ch == '/') | ||
1081 | i = 0; /* overwrite what we wrote */ | ||
1082 | else | ||
1083 | if (i < len - 1) | ||
1084 | tcomm[i++] = ch; | ||
1085 | } | ||
1086 | tcomm[i] = '\0'; | ||
1087 | } | ||
1088 | |||
1074 | int flush_old_exec(struct linux_binprm * bprm) | 1089 | int flush_old_exec(struct linux_binprm * bprm) |
1075 | { | 1090 | { |
1076 | int retval; | 1091 | int retval; |
@@ -1085,6 +1100,7 @@ int flush_old_exec(struct linux_binprm * bprm) | |||
1085 | 1100 | ||
1086 | set_mm_exe_file(bprm->mm, bprm->file); | 1101 | set_mm_exe_file(bprm->mm, bprm->file); |
1087 | 1102 | ||
1103 | filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm)); | ||
1088 | /* | 1104 | /* |
1089 | * Release all of the old mmap stuff | 1105 | * Release all of the old mmap stuff |
1090 | */ | 1106 | */ |
@@ -1116,10 +1132,6 @@ EXPORT_SYMBOL(would_dump); | |||
1116 | 1132 | ||
1117 | void setup_new_exec(struct linux_binprm * bprm) | 1133 | void setup_new_exec(struct linux_binprm * bprm) |
1118 | { | 1134 | { |
1119 | int i, ch; | ||
1120 | const char *name; | ||
1121 | char tcomm[sizeof(current->comm)]; | ||
1122 | |||
1123 | arch_pick_mmap_layout(current->mm); | 1135 | arch_pick_mmap_layout(current->mm); |
1124 | 1136 | ||
1125 | /* This is the point of no return */ | 1137 | /* This is the point of no return */ |
@@ -1130,18 +1142,7 @@ void setup_new_exec(struct linux_binprm * bprm) | |||
1130 | else | 1142 | else |
1131 | set_dumpable(current->mm, suid_dumpable); | 1143 | set_dumpable(current->mm, suid_dumpable); |
1132 | 1144 | ||
1133 | name = bprm->filename; | 1145 | set_task_comm(current, bprm->tcomm); |
1134 | |||
1135 | /* Copies the binary name from after last slash */ | ||
1136 | for (i=0; (ch = *(name++)) != '\0';) { | ||
1137 | if (ch == '/') | ||
1138 | i = 0; /* overwrite what we wrote */ | ||
1139 | else | ||
1140 | if (i < (sizeof(tcomm) - 1)) | ||
1141 | tcomm[i++] = ch; | ||
1142 | } | ||
1143 | tcomm[i] = '\0'; | ||
1144 | set_task_comm(current, tcomm); | ||
1145 | 1146 | ||
1146 | /* Set the new mm task size. We have to do that late because it may | 1147 | /* Set the new mm task size. We have to do that late because it may |
1147 | * depend on TIF_32BIT which is only updated in flush_thread() on | 1148 | * depend on TIF_32BIT which is only updated in flush_thread() on |
@@ -1914,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state) | |||
1914 | { | 1915 | { |
1915 | struct task_struct *tsk = current; | 1916 | struct task_struct *tsk = current; |
1916 | struct mm_struct *mm = tsk->mm; | 1917 | struct mm_struct *mm = tsk->mm; |
1917 | struct completion *vfork_done; | ||
1918 | int core_waiters = -EBUSY; | 1918 | int core_waiters = -EBUSY; |
1919 | 1919 | ||
1920 | init_completion(&core_state->startup); | 1920 | init_completion(&core_state->startup); |
@@ -1926,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) | |||
1926 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); | 1926 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); |
1927 | up_write(&mm->mmap_sem); | 1927 | up_write(&mm->mmap_sem); |
1928 | 1928 | ||
1929 | if (unlikely(core_waiters < 0)) | 1929 | if (core_waiters > 0) |
1930 | goto fail; | ||
1931 | |||
1932 | /* | ||
1933 | * Make sure nobody is waiting for us to release the VM, | ||
1934 | * otherwise we can deadlock when we wait on each other | ||
1935 | */ | ||
1936 | vfork_done = tsk->vfork_done; | ||
1937 | if (vfork_done) { | ||
1938 | tsk->vfork_done = NULL; | ||
1939 | complete(vfork_done); | ||
1940 | } | ||
1941 | |||
1942 | if (core_waiters) | ||
1943 | wait_for_completion(&core_state->startup); | 1930 | wait_for_completion(&core_state->startup); |
1944 | fail: | 1931 | |
1945 | return core_waiters; | 1932 | return core_waiters; |
1946 | } | 1933 | } |
1947 | 1934 | ||
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c index 1089f760c847..2de655f5d625 100644 --- a/fs/ext2/ioctl.c +++ b/fs/ext2/ioctl.c | |||
@@ -77,10 +77,11 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
77 | flags = flags & EXT2_FL_USER_MODIFIABLE; | 77 | flags = flags & EXT2_FL_USER_MODIFIABLE; |
78 | flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; | 78 | flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; |
79 | ei->i_flags = flags; | 79 | ei->i_flags = flags; |
80 | mutex_unlock(&inode->i_mutex); | ||
81 | 80 | ||
82 | ext2_set_inode_flags(inode); | 81 | ext2_set_inode_flags(inode); |
83 | inode->i_ctime = CURRENT_TIME_SEC; | 82 | inode->i_ctime = CURRENT_TIME_SEC; |
83 | mutex_unlock(&inode->i_mutex); | ||
84 | |||
84 | mark_inode_dirty(inode); | 85 | mark_inode_dirty(inode); |
85 | setflags_out: | 86 | setflags_out: |
86 | mnt_drop_write_file(filp); | 87 | mnt_drop_write_file(filp); |
@@ -88,20 +89,29 @@ setflags_out: | |||
88 | } | 89 | } |
89 | case EXT2_IOC_GETVERSION: | 90 | case EXT2_IOC_GETVERSION: |
90 | return put_user(inode->i_generation, (int __user *) arg); | 91 | return put_user(inode->i_generation, (int __user *) arg); |
91 | case EXT2_IOC_SETVERSION: | 92 | case EXT2_IOC_SETVERSION: { |
93 | __u32 generation; | ||
94 | |||
92 | if (!inode_owner_or_capable(inode)) | 95 | if (!inode_owner_or_capable(inode)) |
93 | return -EPERM; | 96 | return -EPERM; |
94 | ret = mnt_want_write_file(filp); | 97 | ret = mnt_want_write_file(filp); |
95 | if (ret) | 98 | if (ret) |
96 | return ret; | 99 | return ret; |
97 | if (get_user(inode->i_generation, (int __user *) arg)) { | 100 | if (get_user(generation, (int __user *) arg)) { |
98 | ret = -EFAULT; | 101 | ret = -EFAULT; |
99 | } else { | 102 | goto setversion_out; |
100 | inode->i_ctime = CURRENT_TIME_SEC; | ||
101 | mark_inode_dirty(inode); | ||
102 | } | 103 | } |
104 | |||
105 | mutex_lock(&inode->i_mutex); | ||
106 | inode->i_ctime = CURRENT_TIME_SEC; | ||
107 | inode->i_generation = generation; | ||
108 | mutex_unlock(&inode->i_mutex); | ||
109 | |||
110 | mark_inode_dirty(inode); | ||
111 | setversion_out: | ||
103 | mnt_drop_write_file(filp); | 112 | mnt_drop_write_file(filp); |
104 | return ret; | 113 | return ret; |
114 | } | ||
105 | case EXT2_IOC_GETRSVSZ: | 115 | case EXT2_IOC_GETRSVSZ: |
106 | if (test_opt(inode->i_sb, RESERVATION) | 116 | if (test_opt(inode->i_sb, RESERVATION) |
107 | && S_ISREG(inode->i_mode) | 117 | && S_ISREG(inode->i_mode) |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f855916657ba..5b4a9362d5aa 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -53,14 +53,6 @@ struct wb_writeback_work { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Include the creation of the trace points after defining the | ||
57 | * wb_writeback_work structure so that the definition remains local to this | ||
58 | * file. | ||
59 | */ | ||
60 | #define CREATE_TRACE_POINTS | ||
61 | #include <trace/events/writeback.h> | ||
62 | |||
63 | /* | ||
64 | * We don't actually have pdflush, but this one is exported though /proc... | 56 | * We don't actually have pdflush, but this one is exported though /proc... |
65 | */ | 57 | */ |
66 | int nr_pdflush_threads; | 58 | int nr_pdflush_threads; |
@@ -92,6 +84,14 @@ static inline struct inode *wb_inode(struct list_head *head) | |||
92 | return list_entry(head, struct inode, i_wb_list); | 84 | return list_entry(head, struct inode, i_wb_list); |
93 | } | 85 | } |
94 | 86 | ||
87 | /* | ||
88 | * Include the creation of the trace points after defining the | ||
89 | * wb_writeback_work structure and inline functions so that the definition | ||
90 | * remains local to this file. | ||
91 | */ | ||
92 | #define CREATE_TRACE_POINTS | ||
93 | #include <trace/events/writeback.h> | ||
94 | |||
95 | /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ | 95 | /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ |
96 | static void bdi_wakeup_flusher(struct backing_dev_info *bdi) | 96 | static void bdi_wakeup_flusher(struct backing_dev_info *bdi) |
97 | { | 97 | { |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 376816fcd040..351a3e797789 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -167,14 +167,19 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl) | |||
167 | spin_unlock(&lru_lock); | 167 | spin_unlock(&lru_lock); |
168 | } | 168 | } |
169 | 169 | ||
170 | static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) | 170 | static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl) |
171 | { | 171 | { |
172 | spin_lock(&lru_lock); | ||
173 | if (!list_empty(&gl->gl_lru)) { | 172 | if (!list_empty(&gl->gl_lru)) { |
174 | list_del_init(&gl->gl_lru); | 173 | list_del_init(&gl->gl_lru); |
175 | atomic_dec(&lru_count); | 174 | atomic_dec(&lru_count); |
176 | clear_bit(GLF_LRU, &gl->gl_flags); | 175 | clear_bit(GLF_LRU, &gl->gl_flags); |
177 | } | 176 | } |
177 | } | ||
178 | |||
179 | static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) | ||
180 | { | ||
181 | spin_lock(&lru_lock); | ||
182 | __gfs2_glock_remove_from_lru(gl); | ||
178 | spin_unlock(&lru_lock); | 183 | spin_unlock(&lru_lock); |
179 | } | 184 | } |
180 | 185 | ||
@@ -217,11 +222,12 @@ void gfs2_glock_put(struct gfs2_glock *gl) | |||
217 | struct gfs2_sbd *sdp = gl->gl_sbd; | 222 | struct gfs2_sbd *sdp = gl->gl_sbd; |
218 | struct address_space *mapping = gfs2_glock2aspace(gl); | 223 | struct address_space *mapping = gfs2_glock2aspace(gl); |
219 | 224 | ||
220 | if (atomic_dec_and_test(&gl->gl_ref)) { | 225 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { |
226 | __gfs2_glock_remove_from_lru(gl); | ||
227 | spin_unlock(&lru_lock); | ||
221 | spin_lock_bucket(gl->gl_hash); | 228 | spin_lock_bucket(gl->gl_hash); |
222 | hlist_bl_del_rcu(&gl->gl_list); | 229 | hlist_bl_del_rcu(&gl->gl_list); |
223 | spin_unlock_bucket(gl->gl_hash); | 230 | spin_unlock_bucket(gl->gl_hash); |
224 | gfs2_glock_remove_from_lru(gl); | ||
225 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 231 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
226 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); | 232 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); |
227 | trace_gfs2_glock_put(gl); | 233 | trace_gfs2_glock_put(gl); |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index a7d611b93f0f..56987460cdae 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -391,10 +391,6 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation) | |||
391 | int error; | 391 | int error; |
392 | int dblocks = 1; | 392 | int dblocks = 1; |
393 | 393 | ||
394 | error = gfs2_rindex_update(sdp); | ||
395 | if (error) | ||
396 | fs_warn(sdp, "rindex update returns %d\n", error); | ||
397 | |||
398 | error = gfs2_inplace_reserve(dip, RES_DINODE); | 394 | error = gfs2_inplace_reserve(dip, RES_DINODE); |
399 | if (error) | 395 | if (error) |
400 | goto out; | 396 | goto out; |
@@ -1043,6 +1039,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) | |||
1043 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); | 1039 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); |
1044 | if (!rgd) | 1040 | if (!rgd) |
1045 | goto out_inodes; | 1041 | goto out_inodes; |
1042 | |||
1046 | gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); | 1043 | gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); |
1047 | 1044 | ||
1048 | 1045 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 6aacf3f230a2..24f609c9ef91 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -800,6 +800,11 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) | |||
800 | fs_err(sdp, "can't get quota file inode: %d\n", error); | 800 | fs_err(sdp, "can't get quota file inode: %d\n", error); |
801 | goto fail_rindex; | 801 | goto fail_rindex; |
802 | } | 802 | } |
803 | |||
804 | error = gfs2_rindex_update(sdp); | ||
805 | if (error) | ||
806 | goto fail_qinode; | ||
807 | |||
803 | return 0; | 808 | return 0; |
804 | 809 | ||
805 | fail_qinode: | 810 | fail_qinode: |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 981bfa32121a..49ada95209d0 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -683,16 +683,21 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp) | |||
683 | struct gfs2_glock *gl = ip->i_gl; | 683 | struct gfs2_glock *gl = ip->i_gl; |
684 | struct gfs2_holder ri_gh; | 684 | struct gfs2_holder ri_gh; |
685 | int error = 0; | 685 | int error = 0; |
686 | int unlock_required = 0; | ||
686 | 687 | ||
687 | /* Read new copy from disk if we don't have the latest */ | 688 | /* Read new copy from disk if we don't have the latest */ |
688 | if (!sdp->sd_rindex_uptodate) { | 689 | if (!sdp->sd_rindex_uptodate) { |
689 | mutex_lock(&sdp->sd_rindex_mutex); | 690 | mutex_lock(&sdp->sd_rindex_mutex); |
690 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); | 691 | if (!gfs2_glock_is_locked_by_me(gl)) { |
691 | if (error) | 692 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); |
692 | return error; | 693 | if (error) |
694 | return error; | ||
695 | unlock_required = 1; | ||
696 | } | ||
693 | if (!sdp->sd_rindex_uptodate) | 697 | if (!sdp->sd_rindex_uptodate) |
694 | error = gfs2_ri_update(ip); | 698 | error = gfs2_ri_update(ip); |
695 | gfs2_glock_dq_uninit(&ri_gh); | 699 | if (unlock_required) |
700 | gfs2_glock_dq_uninit(&ri_gh); | ||
696 | mutex_unlock(&sdp->sd_rindex_mutex); | 701 | mutex_unlock(&sdp->sd_rindex_mutex); |
697 | } | 702 | } |
698 | 703 | ||
diff --git a/fs/inode.c b/fs/inode.c index fb10d86ffad7..83ab215baab1 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) | |||
938 | struct file_system_type *type = inode->i_sb->s_type; | 938 | struct file_system_type *type = inode->i_sb->s_type; |
939 | 939 | ||
940 | /* Set new key only if filesystem hasn't already changed it */ | 940 | /* Set new key only if filesystem hasn't already changed it */ |
941 | if (!lockdep_match_class(&inode->i_mutex, | 941 | if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { |
942 | &type->i_mutex_key)) { | ||
943 | /* | 942 | /* |
944 | * ensure nobody is actually holding i_mutex | 943 | * ensure nobody is actually holding i_mutex |
945 | */ | 944 | */ |
@@ -966,6 +965,7 @@ void unlock_new_inode(struct inode *inode) | |||
966 | spin_lock(&inode->i_lock); | 965 | spin_lock(&inode->i_lock); |
967 | WARN_ON(!(inode->i_state & I_NEW)); | 966 | WARN_ON(!(inode->i_state & I_NEW)); |
968 | inode->i_state &= ~I_NEW; | 967 | inode->i_state &= ~I_NEW; |
968 | smp_mb(); | ||
969 | wake_up_bit(&inode->i_state, __I_NEW); | 969 | wake_up_bit(&inode->i_state, __I_NEW); |
970 | spin_unlock(&inode->i_lock); | 970 | spin_unlock(&inode->i_lock); |
971 | } | 971 | } |
@@ -1651,7 +1651,7 @@ __setup("ihash_entries=", set_ihash_entries); | |||
1651 | */ | 1651 | */ |
1652 | void __init inode_init_early(void) | 1652 | void __init inode_init_early(void) |
1653 | { | 1653 | { |
1654 | int loop; | 1654 | unsigned int loop; |
1655 | 1655 | ||
1656 | /* If hashes are distributed across NUMA nodes, defer | 1656 | /* If hashes are distributed across NUMA nodes, defer |
1657 | * hash allocation until vmalloc space is available. | 1657 | * hash allocation until vmalloc space is available. |
@@ -1669,13 +1669,13 @@ void __init inode_init_early(void) | |||
1669 | &i_hash_mask, | 1669 | &i_hash_mask, |
1670 | 0); | 1670 | 0); |
1671 | 1671 | ||
1672 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | 1672 | for (loop = 0; loop < (1U << i_hash_shift); loop++) |
1673 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | 1673 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | void __init inode_init(void) | 1676 | void __init inode_init(void) |
1677 | { | 1677 | { |
1678 | int loop; | 1678 | unsigned int loop; |
1679 | 1679 | ||
1680 | /* inode slab cache */ | 1680 | /* inode slab cache */ |
1681 | inode_cachep = kmem_cache_create("inode_cache", | 1681 | inode_cachep = kmem_cache_create("inode_cache", |
@@ -1699,7 +1699,7 @@ void __init inode_init(void) | |||
1699 | &i_hash_mask, | 1699 | &i_hash_mask, |
1700 | 0); | 1700 | 0); |
1701 | 1701 | ||
1702 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | 1702 | for (loop = 0; loop < (1U << i_hash_shift); loop++) |
1703 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | 1703 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
1704 | } | 1704 | } |
1705 | 1705 | ||
diff --git a/fs/ioprio.c b/fs/ioprio.c index f84b380d65e5..0f1b9515213b 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
@@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) | |||
51 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | 51 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); |
52 | if (ioc) { | 52 | if (ioc) { |
53 | ioc_ioprio_changed(ioc, ioprio); | 53 | ioc_ioprio_changed(ioc, ioprio); |
54 | put_io_context(ioc, NULL); | 54 | put_io_context(ioc); |
55 | } | 55 | } |
56 | 56 | ||
57 | return err; | 57 | return err; |
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 5d1a00a5041b..05f0754f2b46 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c | |||
@@ -453,8 +453,6 @@ out: | |||
453 | * | 453 | * |
454 | * Return <0 on error, 0 on success, 1 if there was nothing to clean up. | 454 | * Return <0 on error, 0 on success, 1 if there was nothing to clean up. |
455 | * | 455 | * |
456 | * Called with the journal lock held. | ||
457 | * | ||
458 | * This is the only part of the journaling code which really needs to be | 456 | * This is the only part of the journaling code which really needs to be |
459 | * aware of transaction aborts. Checkpointing involves writing to the | 457 | * aware of transaction aborts. Checkpointing involves writing to the |
460 | * main filesystem area rather than to the journal, so it can proceed | 458 | * main filesystem area rather than to the journal, so it can proceed |
@@ -472,13 +470,14 @@ int cleanup_journal_tail(journal_t *journal) | |||
472 | if (is_journal_aborted(journal)) | 470 | if (is_journal_aborted(journal)) |
473 | return 1; | 471 | return 1; |
474 | 472 | ||
475 | /* OK, work out the oldest transaction remaining in the log, and | 473 | /* |
474 | * OK, work out the oldest transaction remaining in the log, and | ||
476 | * the log block it starts at. | 475 | * the log block it starts at. |
477 | * | 476 | * |
478 | * If the log is now empty, we need to work out which is the | 477 | * If the log is now empty, we need to work out which is the |
479 | * next transaction ID we will write, and where it will | 478 | * next transaction ID we will write, and where it will |
480 | * start. */ | 479 | * start. |
481 | 480 | */ | |
482 | spin_lock(&journal->j_state_lock); | 481 | spin_lock(&journal->j_state_lock); |
483 | spin_lock(&journal->j_list_lock); | 482 | spin_lock(&journal->j_list_lock); |
484 | transaction = journal->j_checkpoint_transactions; | 483 | transaction = journal->j_checkpoint_transactions; |
@@ -504,7 +503,25 @@ int cleanup_journal_tail(journal_t *journal) | |||
504 | spin_unlock(&journal->j_state_lock); | 503 | spin_unlock(&journal->j_state_lock); |
505 | return 1; | 504 | return 1; |
506 | } | 505 | } |
506 | spin_unlock(&journal->j_state_lock); | ||
507 | |||
508 | /* | ||
509 | * We need to make sure that any blocks that were recently written out | ||
510 | * --- perhaps by log_do_checkpoint() --- are flushed out before we | ||
511 | * drop the transactions from the journal. It's unlikely this will be | ||
512 | * necessary, especially with an appropriately sized journal, but we | ||
513 | * need this to guarantee correctness. Fortunately | ||
514 | * cleanup_journal_tail() doesn't get called all that often. | ||
515 | */ | ||
516 | if (journal->j_flags & JFS_BARRIER) | ||
517 | blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); | ||
507 | 518 | ||
519 | spin_lock(&journal->j_state_lock); | ||
520 | if (!tid_gt(first_tid, journal->j_tail_sequence)) { | ||
521 | spin_unlock(&journal->j_state_lock); | ||
522 | /* Someone else cleaned up journal so return 0 */ | ||
523 | return 0; | ||
524 | } | ||
508 | /* OK, update the superblock to recover the freed space. | 525 | /* OK, update the superblock to recover the freed space. |
509 | * Physical blocks come first: have we wrapped beyond the end of | 526 | * Physical blocks come first: have we wrapped beyond the end of |
510 | * the log? */ | 527 | * the log? */ |
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index 5b43e96788e6..008bf062fd26 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/jbd.h> | 21 | #include <linux/jbd.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/blkdev.h> | ||
23 | #endif | 24 | #endif |
24 | 25 | ||
25 | /* | 26 | /* |
@@ -263,6 +264,9 @@ int journal_recover(journal_t *journal) | |||
263 | err2 = sync_blockdev(journal->j_fs_dev); | 264 | err2 = sync_blockdev(journal->j_fs_dev); |
264 | if (!err) | 265 | if (!err) |
265 | err = err2; | 266 | err = err2; |
267 | /* Flush disk caches to get replayed data on the permanent storage */ | ||
268 | if (journal->j_flags & JFS_BARRIER) | ||
269 | blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); | ||
266 | 270 | ||
267 | return err; | 271 | return err; |
268 | } | 272 | } |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index a01cdad6aad1..eafb8d37a6fb 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -335,7 +335,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
335 | void *ebuf; | 335 | void *ebuf; |
336 | uint32_t ofs; | 336 | uint32_t ofs; |
337 | size_t retlen; | 337 | size_t retlen; |
338 | int ret = -EIO; | 338 | int ret; |
339 | unsigned long *wordebuf; | 339 | unsigned long *wordebuf; |
340 | 340 | ||
341 | ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, | 341 | ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, |
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c index e97404d611e0..9c501449450d 100644 --- a/fs/logfs/dev_mtd.c +++ b/fs/logfs/dev_mtd.c | |||
@@ -152,9 +152,6 @@ static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs) | |||
152 | filler_t *filler = logfs_mtd_readpage; | 152 | filler_t *filler = logfs_mtd_readpage; |
153 | struct mtd_info *mtd = super->s_mtd; | 153 | struct mtd_info *mtd = super->s_mtd; |
154 | 154 | ||
155 | if (!mtd_can_have_bb(mtd)) | ||
156 | return NULL; | ||
157 | |||
158 | *ofs = 0; | 155 | *ofs = 0; |
159 | while (mtd_block_isbad(mtd, *ofs)) { | 156 | while (mtd_block_isbad(mtd, *ofs)) { |
160 | *ofs += mtd->erasesize; | 157 | *ofs += mtd->erasesize; |
@@ -172,9 +169,6 @@ static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs) | |||
172 | filler_t *filler = logfs_mtd_readpage; | 169 | filler_t *filler = logfs_mtd_readpage; |
173 | struct mtd_info *mtd = super->s_mtd; | 170 | struct mtd_info *mtd = super->s_mtd; |
174 | 171 | ||
175 | if (!mtd_can_have_bb(mtd)) | ||
176 | return NULL; | ||
177 | |||
178 | *ofs = mtd->size - mtd->erasesize; | 172 | *ofs = mtd->size - mtd->erasesize; |
179 | while (mtd_block_isbad(mtd, *ofs)) { | 173 | while (mtd_block_isbad(mtd, *ofs)) { |
180 | *ofs -= mtd->erasesize; | 174 | *ofs -= mtd->erasesize; |
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 501043e8966c..3de7a32cadbe 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c | |||
@@ -71,7 +71,7 @@ static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd, | |||
71 | 71 | ||
72 | static int write_inode(struct inode *inode) | 72 | static int write_inode(struct inode *inode) |
73 | { | 73 | { |
74 | return __logfs_write_inode(inode, WF_LOCK); | 74 | return __logfs_write_inode(inode, NULL, WF_LOCK); |
75 | } | 75 | } |
76 | 76 | ||
77 | static s64 dir_seek_data(struct inode *inode, s64 pos) | 77 | static s64 dir_seek_data(struct inode *inode, s64 pos) |
diff --git a/fs/logfs/file.c b/fs/logfs/file.c index b548c87a86f1..3886cded283c 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c | |||
@@ -230,7 +230,9 @@ int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) | |||
230 | return ret; | 230 | return ret; |
231 | 231 | ||
232 | mutex_lock(&inode->i_mutex); | 232 | mutex_lock(&inode->i_mutex); |
233 | logfs_get_wblocks(sb, NULL, WF_LOCK); | ||
233 | logfs_write_anchor(sb); | 234 | logfs_write_anchor(sb); |
235 | logfs_put_wblocks(sb, NULL, WF_LOCK); | ||
234 | mutex_unlock(&inode->i_mutex); | 236 | mutex_unlock(&inode->i_mutex); |
235 | 237 | ||
236 | return 0; | 238 | return 0; |
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index caa4419285dc..d4efb061bdc5 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c | |||
@@ -367,7 +367,7 @@ static struct gc_candidate *get_candidate(struct super_block *sb) | |||
367 | int i, max_dist; | 367 | int i, max_dist; |
368 | struct gc_candidate *cand = NULL, *this; | 368 | struct gc_candidate *cand = NULL, *this; |
369 | 369 | ||
370 | max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS); | 370 | max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS - 1); |
371 | 371 | ||
372 | for (i = max_dist; i >= 0; i--) { | 372 | for (i = max_dist; i >= 0; i--) { |
373 | this = first_in_list(&super->s_low_list[i]); | 373 | this = first_in_list(&super->s_low_list[i]); |
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index 388df1aa35e5..a422f42238b2 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c | |||
@@ -286,7 +286,7 @@ static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
286 | if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN) | 286 | if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN) |
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | ret = __logfs_write_inode(inode, flags); | 289 | ret = __logfs_write_inode(inode, NULL, flags); |
290 | LOGFS_BUG_ON(ret, inode->i_sb); | 290 | LOGFS_BUG_ON(ret, inode->i_sb); |
291 | return ret; | 291 | return ret; |
292 | } | 292 | } |
@@ -363,7 +363,9 @@ static void logfs_init_once(void *_li) | |||
363 | 363 | ||
364 | static int logfs_sync_fs(struct super_block *sb, int wait) | 364 | static int logfs_sync_fs(struct super_block *sb, int wait) |
365 | { | 365 | { |
366 | logfs_get_wblocks(sb, NULL, WF_LOCK); | ||
366 | logfs_write_anchor(sb); | 367 | logfs_write_anchor(sb); |
368 | logfs_put_wblocks(sb, NULL, WF_LOCK); | ||
367 | return 0; | 369 | return 0; |
368 | } | 370 | } |
369 | 371 | ||
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 9da29706f91c..1e1c369df22b 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
@@ -612,7 +612,6 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, | |||
612 | if (len == 0) | 612 | if (len == 0) |
613 | return logfs_write_header(super, header, 0, type); | 613 | return logfs_write_header(super, header, 0, type); |
614 | 614 | ||
615 | BUG_ON(len > sb->s_blocksize); | ||
616 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); | 615 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); |
617 | if (compr_len < 0 || type == JE_ANCHOR) { | 616 | if (compr_len < 0 || type == JE_ANCHOR) { |
618 | memcpy(data, buf, len); | 617 | memcpy(data, buf, len); |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index 926373866a55..5f0937609465 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
@@ -528,7 +528,7 @@ void logfs_destroy_inode_cache(void); | |||
528 | void logfs_set_blocks(struct inode *inode, u64 no); | 528 | void logfs_set_blocks(struct inode *inode, u64 no); |
529 | /* these logically belong into inode.c but actually reside in readwrite.c */ | 529 | /* these logically belong into inode.c but actually reside in readwrite.c */ |
530 | int logfs_read_inode(struct inode *inode); | 530 | int logfs_read_inode(struct inode *inode); |
531 | int __logfs_write_inode(struct inode *inode, long flags); | 531 | int __logfs_write_inode(struct inode *inode, struct page *, long flags); |
532 | void logfs_evict_inode(struct inode *inode); | 532 | void logfs_evict_inode(struct inode *inode); |
533 | 533 | ||
534 | /* journal.c */ | 534 | /* journal.c */ |
@@ -577,6 +577,8 @@ void initialize_block_counters(struct page *page, struct logfs_block *block, | |||
577 | __be64 *array, int page_is_empty); | 577 | __be64 *array, int page_is_empty); |
578 | int logfs_exist_block(struct inode *inode, u64 bix); | 578 | int logfs_exist_block(struct inode *inode, u64 bix); |
579 | int get_page_reserve(struct inode *inode, struct page *page); | 579 | int get_page_reserve(struct inode *inode, struct page *page); |
580 | void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock); | ||
581 | void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock); | ||
580 | extern struct logfs_block_ops indirect_block_ops; | 582 | extern struct logfs_block_ops indirect_block_ops; |
581 | 583 | ||
582 | /* segment.c */ | 584 | /* segment.c */ |
@@ -594,6 +596,7 @@ int logfs_init_mapping(struct super_block *sb); | |||
594 | void logfs_sync_area(struct logfs_area *area); | 596 | void logfs_sync_area(struct logfs_area *area); |
595 | void logfs_sync_segments(struct super_block *sb); | 597 | void logfs_sync_segments(struct super_block *sb); |
596 | void freeseg(struct super_block *sb, u32 segno); | 598 | void freeseg(struct super_block *sb, u32 segno); |
599 | void free_areas(struct super_block *sb); | ||
597 | 600 | ||
598 | /* area handling */ | 601 | /* area handling */ |
599 | int logfs_init_areas(struct super_block *sb); | 602 | int logfs_init_areas(struct super_block *sb); |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 2ac4217b7901..4153e65b0148 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
@@ -244,8 +244,7 @@ static void preunlock_page(struct super_block *sb, struct page *page, int lock) | |||
244 | * is waiting for s_write_mutex. We annotate this fact by setting PG_pre_locked | 244 | * is waiting for s_write_mutex. We annotate this fact by setting PG_pre_locked |
245 | * in addition to PG_locked. | 245 | * in addition to PG_locked. |
246 | */ | 246 | */ |
247 | static void logfs_get_wblocks(struct super_block *sb, struct page *page, | 247 | void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock) |
248 | int lock) | ||
249 | { | 248 | { |
250 | struct logfs_super *super = logfs_super(sb); | 249 | struct logfs_super *super = logfs_super(sb); |
251 | 250 | ||
@@ -260,8 +259,7 @@ static void logfs_get_wblocks(struct super_block *sb, struct page *page, | |||
260 | } | 259 | } |
261 | } | 260 | } |
262 | 261 | ||
263 | static void logfs_put_wblocks(struct super_block *sb, struct page *page, | 262 | void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock) |
264 | int lock) | ||
265 | { | 263 | { |
266 | struct logfs_super *super = logfs_super(sb); | 264 | struct logfs_super *super = logfs_super(sb); |
267 | 265 | ||
@@ -424,7 +422,7 @@ static void inode_write_block(struct logfs_block *block) | |||
424 | if (inode->i_ino == LOGFS_INO_MASTER) | 422 | if (inode->i_ino == LOGFS_INO_MASTER) |
425 | logfs_write_anchor(inode->i_sb); | 423 | logfs_write_anchor(inode->i_sb); |
426 | else { | 424 | else { |
427 | ret = __logfs_write_inode(inode, 0); | 425 | ret = __logfs_write_inode(inode, NULL, 0); |
428 | /* see indirect_write_block comment */ | 426 | /* see indirect_write_block comment */ |
429 | BUG_ON(ret); | 427 | BUG_ON(ret); |
430 | } | 428 | } |
@@ -560,8 +558,13 @@ static void inode_free_block(struct super_block *sb, struct logfs_block *block) | |||
560 | static void indirect_free_block(struct super_block *sb, | 558 | static void indirect_free_block(struct super_block *sb, |
561 | struct logfs_block *block) | 559 | struct logfs_block *block) |
562 | { | 560 | { |
563 | ClearPagePrivate(block->page); | 561 | struct page *page = block->page; |
564 | block->page->private = 0; | 562 | |
563 | if (PagePrivate(page)) { | ||
564 | ClearPagePrivate(page); | ||
565 | page_cache_release(page); | ||
566 | set_page_private(page, 0); | ||
567 | } | ||
565 | __free_block(sb, block); | 568 | __free_block(sb, block); |
566 | } | 569 | } |
567 | 570 | ||
@@ -650,8 +653,11 @@ static void alloc_data_block(struct inode *inode, struct page *page) | |||
650 | logfs_unpack_index(page->index, &bix, &level); | 653 | logfs_unpack_index(page->index, &bix, &level); |
651 | block = __alloc_block(inode->i_sb, inode->i_ino, bix, level); | 654 | block = __alloc_block(inode->i_sb, inode->i_ino, bix, level); |
652 | block->page = page; | 655 | block->page = page; |
656 | |||
653 | SetPagePrivate(page); | 657 | SetPagePrivate(page); |
654 | page->private = (unsigned long)block; | 658 | page_cache_get(page); |
659 | set_page_private(page, (unsigned long) block); | ||
660 | |||
655 | block->ops = &indirect_block_ops; | 661 | block->ops = &indirect_block_ops; |
656 | } | 662 | } |
657 | 663 | ||
@@ -1570,11 +1576,15 @@ int logfs_write_buf(struct inode *inode, struct page *page, long flags) | |||
1570 | static int __logfs_delete(struct inode *inode, struct page *page) | 1576 | static int __logfs_delete(struct inode *inode, struct page *page) |
1571 | { | 1577 | { |
1572 | long flags = WF_DELETE; | 1578 | long flags = WF_DELETE; |
1579 | int err; | ||
1573 | 1580 | ||
1574 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 1581 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
1575 | 1582 | ||
1576 | if (page->index < I0_BLOCKS) | 1583 | if (page->index < I0_BLOCKS) |
1577 | return logfs_write_direct(inode, page, flags); | 1584 | return logfs_write_direct(inode, page, flags); |
1585 | err = grow_inode(inode, page->index, 0); | ||
1586 | if (err) | ||
1587 | return err; | ||
1578 | return logfs_write_rec(inode, page, page->index, 0, flags); | 1588 | return logfs_write_rec(inode, page, page->index, 0, flags); |
1579 | } | 1589 | } |
1580 | 1590 | ||
@@ -1623,7 +1633,7 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, | |||
1623 | if (inode->i_ino == LOGFS_INO_MASTER) | 1633 | if (inode->i_ino == LOGFS_INO_MASTER) |
1624 | logfs_write_anchor(inode->i_sb); | 1634 | logfs_write_anchor(inode->i_sb); |
1625 | else { | 1635 | else { |
1626 | err = __logfs_write_inode(inode, flags); | 1636 | err = __logfs_write_inode(inode, page, flags); |
1627 | } | 1637 | } |
1628 | } | 1638 | } |
1629 | } | 1639 | } |
@@ -1873,7 +1883,7 @@ int logfs_truncate(struct inode *inode, u64 target) | |||
1873 | logfs_get_wblocks(sb, NULL, 1); | 1883 | logfs_get_wblocks(sb, NULL, 1); |
1874 | err = __logfs_truncate(inode, size); | 1884 | err = __logfs_truncate(inode, size); |
1875 | if (!err) | 1885 | if (!err) |
1876 | err = __logfs_write_inode(inode, 0); | 1886 | err = __logfs_write_inode(inode, NULL, 0); |
1877 | logfs_put_wblocks(sb, NULL, 1); | 1887 | logfs_put_wblocks(sb, NULL, 1); |
1878 | } | 1888 | } |
1879 | 1889 | ||
@@ -1901,8 +1911,11 @@ static void move_page_to_inode(struct inode *inode, struct page *page) | |||
1901 | li->li_block = block; | 1911 | li->li_block = block; |
1902 | 1912 | ||
1903 | block->page = NULL; | 1913 | block->page = NULL; |
1904 | page->private = 0; | 1914 | if (PagePrivate(page)) { |
1905 | ClearPagePrivate(page); | 1915 | ClearPagePrivate(page); |
1916 | page_cache_release(page); | ||
1917 | set_page_private(page, 0); | ||
1918 | } | ||
1906 | } | 1919 | } |
1907 | 1920 | ||
1908 | static void move_inode_to_page(struct page *page, struct inode *inode) | 1921 | static void move_inode_to_page(struct page *page, struct inode *inode) |
@@ -1918,8 +1931,12 @@ static void move_inode_to_page(struct page *page, struct inode *inode) | |||
1918 | BUG_ON(PagePrivate(page)); | 1931 | BUG_ON(PagePrivate(page)); |
1919 | block->ops = &indirect_block_ops; | 1932 | block->ops = &indirect_block_ops; |
1920 | block->page = page; | 1933 | block->page = page; |
1921 | page->private = (unsigned long)block; | 1934 | |
1922 | SetPagePrivate(page); | 1935 | if (!PagePrivate(page)) { |
1936 | SetPagePrivate(page); | ||
1937 | page_cache_get(page); | ||
1938 | set_page_private(page, (unsigned long) block); | ||
1939 | } | ||
1923 | 1940 | ||
1924 | block->inode = NULL; | 1941 | block->inode = NULL; |
1925 | li->li_block = NULL; | 1942 | li->li_block = NULL; |
@@ -2106,14 +2123,14 @@ void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec) | |||
2106 | ec_level); | 2123 | ec_level); |
2107 | } | 2124 | } |
2108 | 2125 | ||
2109 | int __logfs_write_inode(struct inode *inode, long flags) | 2126 | int __logfs_write_inode(struct inode *inode, struct page *page, long flags) |
2110 | { | 2127 | { |
2111 | struct super_block *sb = inode->i_sb; | 2128 | struct super_block *sb = inode->i_sb; |
2112 | int ret; | 2129 | int ret; |
2113 | 2130 | ||
2114 | logfs_get_wblocks(sb, NULL, flags & WF_LOCK); | 2131 | logfs_get_wblocks(sb, page, flags & WF_LOCK); |
2115 | ret = do_write_inode(inode); | 2132 | ret = do_write_inode(inode); |
2116 | logfs_put_wblocks(sb, NULL, flags & WF_LOCK); | 2133 | logfs_put_wblocks(sb, page, flags & WF_LOCK); |
2117 | return ret; | 2134 | return ret; |
2118 | } | 2135 | } |
2119 | 2136 | ||
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 9d5187353255..ab798ed1cc88 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -86,7 +86,11 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | |||
86 | BUG_ON(!page); /* FIXME: reserve a pool */ | 86 | BUG_ON(!page); /* FIXME: reserve a pool */ |
87 | SetPageUptodate(page); | 87 | SetPageUptodate(page); |
88 | memcpy(page_address(page) + offset, buf, copylen); | 88 | memcpy(page_address(page) + offset, buf, copylen); |
89 | SetPagePrivate(page); | 89 | |
90 | if (!PagePrivate(page)) { | ||
91 | SetPagePrivate(page); | ||
92 | page_cache_get(page); | ||
93 | } | ||
90 | page_cache_release(page); | 94 | page_cache_release(page); |
91 | 95 | ||
92 | buf += copylen; | 96 | buf += copylen; |
@@ -110,7 +114,10 @@ static void pad_partial_page(struct logfs_area *area) | |||
110 | page = get_mapping_page(sb, index, 0); | 114 | page = get_mapping_page(sb, index, 0); |
111 | BUG_ON(!page); /* FIXME: reserve a pool */ | 115 | BUG_ON(!page); /* FIXME: reserve a pool */ |
112 | memset(page_address(page) + offset, 0xff, len); | 116 | memset(page_address(page) + offset, 0xff, len); |
113 | SetPagePrivate(page); | 117 | if (!PagePrivate(page)) { |
118 | SetPagePrivate(page); | ||
119 | page_cache_get(page); | ||
120 | } | ||
114 | page_cache_release(page); | 121 | page_cache_release(page); |
115 | } | 122 | } |
116 | } | 123 | } |
@@ -130,7 +137,10 @@ static void pad_full_pages(struct logfs_area *area) | |||
130 | BUG_ON(!page); /* FIXME: reserve a pool */ | 137 | BUG_ON(!page); /* FIXME: reserve a pool */ |
131 | SetPageUptodate(page); | 138 | SetPageUptodate(page); |
132 | memset(page_address(page), 0xff, PAGE_CACHE_SIZE); | 139 | memset(page_address(page), 0xff, PAGE_CACHE_SIZE); |
133 | SetPagePrivate(page); | 140 | if (!PagePrivate(page)) { |
141 | SetPagePrivate(page); | ||
142 | page_cache_get(page); | ||
143 | } | ||
134 | page_cache_release(page); | 144 | page_cache_release(page); |
135 | index++; | 145 | index++; |
136 | no_indizes--; | 146 | no_indizes--; |
@@ -485,8 +495,12 @@ static void move_btree_to_page(struct inode *inode, struct page *page, | |||
485 | mempool_free(item, super->s_alias_pool); | 495 | mempool_free(item, super->s_alias_pool); |
486 | } | 496 | } |
487 | block->page = page; | 497 | block->page = page; |
488 | SetPagePrivate(page); | 498 | |
489 | page->private = (unsigned long)block; | 499 | if (!PagePrivate(page)) { |
500 | SetPagePrivate(page); | ||
501 | page_cache_get(page); | ||
502 | set_page_private(page, (unsigned long) block); | ||
503 | } | ||
490 | block->ops = &indirect_block_ops; | 504 | block->ops = &indirect_block_ops; |
491 | initialize_block_counters(page, block, data, 0); | 505 | initialize_block_counters(page, block, data, 0); |
492 | } | 506 | } |
@@ -536,8 +550,12 @@ void move_page_to_btree(struct page *page) | |||
536 | list_add(&item->list, &block->item_list); | 550 | list_add(&item->list, &block->item_list); |
537 | } | 551 | } |
538 | block->page = NULL; | 552 | block->page = NULL; |
539 | ClearPagePrivate(page); | 553 | |
540 | page->private = 0; | 554 | if (PagePrivate(page)) { |
555 | ClearPagePrivate(page); | ||
556 | page_cache_release(page); | ||
557 | set_page_private(page, 0); | ||
558 | } | ||
541 | block->ops = &btree_block_ops; | 559 | block->ops = &btree_block_ops; |
542 | err = alias_tree_insert(block->sb, block->ino, block->bix, block->level, | 560 | err = alias_tree_insert(block->sb, block->ino, block->bix, block->level, |
543 | block); | 561 | block); |
@@ -702,7 +720,10 @@ void freeseg(struct super_block *sb, u32 segno) | |||
702 | page = find_get_page(mapping, ofs >> PAGE_SHIFT); | 720 | page = find_get_page(mapping, ofs >> PAGE_SHIFT); |
703 | if (!page) | 721 | if (!page) |
704 | continue; | 722 | continue; |
705 | ClearPagePrivate(page); | 723 | if (PagePrivate(page)) { |
724 | ClearPagePrivate(page); | ||
725 | page_cache_release(page); | ||
726 | } | ||
706 | page_cache_release(page); | 727 | page_cache_release(page); |
707 | } | 728 | } |
708 | } | 729 | } |
@@ -841,6 +862,16 @@ static void free_area(struct logfs_area *area) | |||
841 | kfree(area); | 862 | kfree(area); |
842 | } | 863 | } |
843 | 864 | ||
865 | void free_areas(struct super_block *sb) | ||
866 | { | ||
867 | struct logfs_super *super = logfs_super(sb); | ||
868 | int i; | ||
869 | |||
870 | for_each_area(i) | ||
871 | free_area(super->s_area[i]); | ||
872 | free_area(super->s_journal_area); | ||
873 | } | ||
874 | |||
844 | static struct logfs_area *alloc_area(struct super_block *sb) | 875 | static struct logfs_area *alloc_area(struct super_block *sb) |
845 | { | 876 | { |
846 | struct logfs_area *area; | 877 | struct logfs_area *area; |
@@ -923,10 +954,6 @@ err: | |||
923 | void logfs_cleanup_areas(struct super_block *sb) | 954 | void logfs_cleanup_areas(struct super_block *sb) |
924 | { | 955 | { |
925 | struct logfs_super *super = logfs_super(sb); | 956 | struct logfs_super *super = logfs_super(sb); |
926 | int i; | ||
927 | 957 | ||
928 | btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias); | 958 | btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias); |
929 | for_each_area(i) | ||
930 | free_area(super->s_area[i]); | ||
931 | free_area(super->s_journal_area); | ||
932 | } | 959 | } |
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index e795c234ea33..c9ee7f5d1caf 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
@@ -486,14 +486,15 @@ static void logfs_kill_sb(struct super_block *sb) | |||
486 | /* Alias entries slow down mount, so evict as many as possible */ | 486 | /* Alias entries slow down mount, so evict as many as possible */ |
487 | sync_filesystem(sb); | 487 | sync_filesystem(sb); |
488 | logfs_write_anchor(sb); | 488 | logfs_write_anchor(sb); |
489 | free_areas(sb); | ||
489 | 490 | ||
490 | /* | 491 | /* |
491 | * From this point on alias entries are simply dropped - and any | 492 | * From this point on alias entries are simply dropped - and any |
492 | * writes to the object store are considered bugs. | 493 | * writes to the object store are considered bugs. |
493 | */ | 494 | */ |
494 | super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN; | ||
495 | log_super("LogFS: Now in shutdown\n"); | 495 | log_super("LogFS: Now in shutdown\n"); |
496 | generic_shutdown_super(sb); | 496 | generic_shutdown_super(sb); |
497 | super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN; | ||
497 | 498 | ||
498 | BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes); | 499 | BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes); |
499 | 500 | ||
diff --git a/fs/namei.c b/fs/namei.c index 208c6aa4a989..46ea9cc16647 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1095,8 +1095,10 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr | |||
1095 | struct dentry *old; | 1095 | struct dentry *old; |
1096 | 1096 | ||
1097 | /* Don't create child dentry for a dead directory. */ | 1097 | /* Don't create child dentry for a dead directory. */ |
1098 | if (unlikely(IS_DEADDIR(inode))) | 1098 | if (unlikely(IS_DEADDIR(inode))) { |
1099 | dput(dentry); | ||
1099 | return ERR_PTR(-ENOENT); | 1100 | return ERR_PTR(-ENOENT); |
1101 | } | ||
1100 | 1102 | ||
1101 | old = inode->i_op->lookup(inode, dentry, nd); | 1103 | old = inode->i_op->lookup(inode, dentry, nd); |
1102 | if (unlikely(old)) { | 1104 | if (unlikely(old)) { |
@@ -1372,6 +1374,34 @@ static inline int can_lookup(struct inode *inode) | |||
1372 | return 1; | 1374 | return 1; |
1373 | } | 1375 | } |
1374 | 1376 | ||
1377 | unsigned int full_name_hash(const unsigned char *name, unsigned int len) | ||
1378 | { | ||
1379 | unsigned long hash = init_name_hash(); | ||
1380 | while (len--) | ||
1381 | hash = partial_name_hash(*name++, hash); | ||
1382 | return end_name_hash(hash); | ||
1383 | } | ||
1384 | EXPORT_SYMBOL(full_name_hash); | ||
1385 | |||
1386 | /* | ||
1387 | * We know there's a real path component here of at least | ||
1388 | * one character. | ||
1389 | */ | ||
1390 | static inline unsigned long hash_name(const char *name, unsigned int *hashp) | ||
1391 | { | ||
1392 | unsigned long hash = init_name_hash(); | ||
1393 | unsigned long len = 0, c; | ||
1394 | |||
1395 | c = (unsigned char)*name; | ||
1396 | do { | ||
1397 | len++; | ||
1398 | hash = partial_name_hash(c, hash); | ||
1399 | c = (unsigned char)name[len]; | ||
1400 | } while (c && c != '/'); | ||
1401 | *hashp = end_name_hash(hash); | ||
1402 | return len; | ||
1403 | } | ||
1404 | |||
1375 | /* | 1405 | /* |
1376 | * Name resolution. | 1406 | * Name resolution. |
1377 | * This is the basic name resolution function, turning a pathname into | 1407 | * This is the basic name resolution function, turning a pathname into |
@@ -1392,31 +1422,22 @@ static int link_path_walk(const char *name, struct nameidata *nd) | |||
1392 | 1422 | ||
1393 | /* At this point we know we have a real path component. */ | 1423 | /* At this point we know we have a real path component. */ |
1394 | for(;;) { | 1424 | for(;;) { |
1395 | unsigned long hash; | ||
1396 | struct qstr this; | 1425 | struct qstr this; |
1397 | unsigned int c; | 1426 | long len; |
1398 | int type; | 1427 | int type; |
1399 | 1428 | ||
1400 | err = may_lookup(nd); | 1429 | err = may_lookup(nd); |
1401 | if (err) | 1430 | if (err) |
1402 | break; | 1431 | break; |
1403 | 1432 | ||
1433 | len = hash_name(name, &this.hash); | ||
1404 | this.name = name; | 1434 | this.name = name; |
1405 | c = *(const unsigned char *)name; | 1435 | this.len = len; |
1406 | |||
1407 | hash = init_name_hash(); | ||
1408 | do { | ||
1409 | name++; | ||
1410 | hash = partial_name_hash(c, hash); | ||
1411 | c = *(const unsigned char *)name; | ||
1412 | } while (c && (c != '/')); | ||
1413 | this.len = name - (const char *) this.name; | ||
1414 | this.hash = end_name_hash(hash); | ||
1415 | 1436 | ||
1416 | type = LAST_NORM; | 1437 | type = LAST_NORM; |
1417 | if (this.name[0] == '.') switch (this.len) { | 1438 | if (name[0] == '.') switch (len) { |
1418 | case 2: | 1439 | case 2: |
1419 | if (this.name[1] == '.') { | 1440 | if (name[1] == '.') { |
1420 | type = LAST_DOTDOT; | 1441 | type = LAST_DOTDOT; |
1421 | nd->flags |= LOOKUP_JUMPED; | 1442 | nd->flags |= LOOKUP_JUMPED; |
1422 | } | 1443 | } |
@@ -1435,12 +1456,18 @@ static int link_path_walk(const char *name, struct nameidata *nd) | |||
1435 | } | 1456 | } |
1436 | } | 1457 | } |
1437 | 1458 | ||
1438 | /* remove trailing slashes? */ | 1459 | if (!name[len]) |
1439 | if (!c) | ||
1440 | goto last_component; | 1460 | goto last_component; |
1441 | while (*++name == '/'); | 1461 | /* |
1442 | if (!*name) | 1462 | * If it wasn't NUL, we know it was '/'. Skip that |
1463 | * slash, and continue until no more slashes. | ||
1464 | */ | ||
1465 | do { | ||
1466 | len++; | ||
1467 | } while (unlikely(name[len] == '/')); | ||
1468 | if (!name[len]) | ||
1443 | goto last_component; | 1469 | goto last_component; |
1470 | name += len; | ||
1444 | 1471 | ||
1445 | err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); | 1472 | err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); |
1446 | if (err < 0) | 1473 | if (err < 0) |
@@ -1773,24 +1800,21 @@ static struct dentry *lookup_hash(struct nameidata *nd) | |||
1773 | struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) | 1800 | struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) |
1774 | { | 1801 | { |
1775 | struct qstr this; | 1802 | struct qstr this; |
1776 | unsigned long hash; | ||
1777 | unsigned int c; | 1803 | unsigned int c; |
1778 | 1804 | ||
1779 | WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); | 1805 | WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); |
1780 | 1806 | ||
1781 | this.name = name; | 1807 | this.name = name; |
1782 | this.len = len; | 1808 | this.len = len; |
1809 | this.hash = full_name_hash(name, len); | ||
1783 | if (!len) | 1810 | if (!len) |
1784 | return ERR_PTR(-EACCES); | 1811 | return ERR_PTR(-EACCES); |
1785 | 1812 | ||
1786 | hash = init_name_hash(); | ||
1787 | while (len--) { | 1813 | while (len--) { |
1788 | c = *(const unsigned char *)name++; | 1814 | c = *(const unsigned char *)name++; |
1789 | if (c == '/' || c == '\0') | 1815 | if (c == '/' || c == '\0') |
1790 | return ERR_PTR(-EACCES); | 1816 | return ERR_PTR(-EACCES); |
1791 | hash = partial_name_hash(c, hash); | ||
1792 | } | 1817 | } |
1793 | this.hash = end_name_hash(hash); | ||
1794 | /* | 1818 | /* |
1795 | * See if the low-level filesystem might want | 1819 | * See if the low-level filesystem might want |
1796 | * to use its own hash.. | 1820 | * to use its own hash.. |
@@ -2138,7 +2162,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2138 | /* sayonara */ | 2162 | /* sayonara */ |
2139 | error = complete_walk(nd); | 2163 | error = complete_walk(nd); |
2140 | if (error) | 2164 | if (error) |
2141 | return ERR_PTR(-ECHILD); | 2165 | return ERR_PTR(error); |
2142 | 2166 | ||
2143 | error = -ENOTDIR; | 2167 | error = -ENOTDIR; |
2144 | if (nd->flags & LOOKUP_DIRECTORY) { | 2168 | if (nd->flags & LOOKUP_DIRECTORY) { |
@@ -2237,7 +2261,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2237 | /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ | 2261 | /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ |
2238 | error = complete_walk(nd); | 2262 | error = complete_walk(nd); |
2239 | if (error) | 2263 | if (error) |
2240 | goto exit; | 2264 | return ERR_PTR(error); |
2241 | error = -EISDIR; | 2265 | error = -EISDIR; |
2242 | if (S_ISDIR(nd->inode->i_mode)) | 2266 | if (S_ISDIR(nd->inode->i_mode)) |
2243 | goto exit; | 2267 | goto exit; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f0c849c98fe4..ec9f6ef6c5dd 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -3575,8 +3575,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu | |||
3575 | } | 3575 | } |
3576 | if (npages > 1) { | 3576 | if (npages > 1) { |
3577 | /* for decoding across pages */ | 3577 | /* for decoding across pages */ |
3578 | args.acl_scratch = alloc_page(GFP_KERNEL); | 3578 | res.acl_scratch = alloc_page(GFP_KERNEL); |
3579 | if (!args.acl_scratch) | 3579 | if (!res.acl_scratch) |
3580 | goto out_free; | 3580 | goto out_free; |
3581 | } | 3581 | } |
3582 | args.acl_len = npages * PAGE_SIZE; | 3582 | args.acl_len = npages * PAGE_SIZE; |
@@ -3612,8 +3612,8 @@ out_free: | |||
3612 | for (i = 0; i < npages; i++) | 3612 | for (i = 0; i < npages; i++) |
3613 | if (pages[i]) | 3613 | if (pages[i]) |
3614 | __free_page(pages[i]); | 3614 | __free_page(pages[i]); |
3615 | if (args.acl_scratch) | 3615 | if (res.acl_scratch) |
3616 | __free_page(args.acl_scratch); | 3616 | __free_page(res.acl_scratch); |
3617 | return ret; | 3617 | return ret; |
3618 | } | 3618 | } |
3619 | 3619 | ||
@@ -4883,8 +4883,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | |||
4883 | clp->cl_rpcclient->cl_auth->au_flavor); | 4883 | clp->cl_rpcclient->cl_auth->au_flavor); |
4884 | 4884 | ||
4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); | 4885 | res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); |
4886 | if (unlikely(!res.server_scope)) | 4886 | if (unlikely(!res.server_scope)) { |
4887 | return -ENOMEM; | 4887 | status = -ENOMEM; |
4888 | goto out; | ||
4889 | } | ||
4888 | 4890 | ||
4889 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 4891 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
4890 | if (!status) | 4892 | if (!status) |
@@ -4901,12 +4903,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | |||
4901 | clp->server_scope = NULL; | 4903 | clp->server_scope = NULL; |
4902 | } | 4904 | } |
4903 | 4905 | ||
4904 | if (!clp->server_scope) | 4906 | if (!clp->server_scope) { |
4905 | clp->server_scope = res.server_scope; | 4907 | clp->server_scope = res.server_scope; |
4906 | else | 4908 | goto out; |
4907 | kfree(res.server_scope); | 4909 | } |
4908 | } | 4910 | } |
4909 | 4911 | kfree(res.server_scope); | |
4912 | out: | ||
4910 | dprintk("<-- %s status= %d\n", __func__, status); | 4913 | dprintk("<-- %s status= %d\n", __func__, status); |
4911 | return status; | 4914 | return status; |
4912 | } | 4915 | } |
@@ -5008,37 +5011,53 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) | |||
5008 | return status; | 5011 | return status; |
5009 | } | 5012 | } |
5010 | 5013 | ||
5014 | static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) | ||
5015 | { | ||
5016 | return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); | ||
5017 | } | ||
5018 | |||
5019 | static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, | ||
5020 | struct nfs4_slot *new, | ||
5021 | u32 max_slots, | ||
5022 | u32 ivalue) | ||
5023 | { | ||
5024 | struct nfs4_slot *old = NULL; | ||
5025 | u32 i; | ||
5026 | |||
5027 | spin_lock(&tbl->slot_tbl_lock); | ||
5028 | if (new) { | ||
5029 | old = tbl->slots; | ||
5030 | tbl->slots = new; | ||
5031 | tbl->max_slots = max_slots; | ||
5032 | } | ||
5033 | tbl->highest_used_slotid = -1; /* no slot is currently used */ | ||
5034 | for (i = 0; i < tbl->max_slots; i++) | ||
5035 | tbl->slots[i].seq_nr = ivalue; | ||
5036 | spin_unlock(&tbl->slot_tbl_lock); | ||
5037 | kfree(old); | ||
5038 | } | ||
5039 | |||
5011 | /* | 5040 | /* |
5012 | * Reset a slot table | 5041 | * (re)Initialise a slot table |
5013 | */ | 5042 | */ |
5014 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, | 5043 | static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, |
5015 | int ivalue) | 5044 | u32 ivalue) |
5016 | { | 5045 | { |
5017 | struct nfs4_slot *new = NULL; | 5046 | struct nfs4_slot *new = NULL; |
5018 | int i; | 5047 | int ret = -ENOMEM; |
5019 | int ret = 0; | ||
5020 | 5048 | ||
5021 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, | 5049 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, |
5022 | max_reqs, tbl->max_slots); | 5050 | max_reqs, tbl->max_slots); |
5023 | 5051 | ||
5024 | /* Does the newly negotiated max_reqs match the existing slot table? */ | 5052 | /* Does the newly negotiated max_reqs match the existing slot table? */ |
5025 | if (max_reqs != tbl->max_slots) { | 5053 | if (max_reqs != tbl->max_slots) { |
5026 | ret = -ENOMEM; | 5054 | new = nfs4_alloc_slots(max_reqs, GFP_NOFS); |
5027 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), | ||
5028 | GFP_NOFS); | ||
5029 | if (!new) | 5055 | if (!new) |
5030 | goto out; | 5056 | goto out; |
5031 | ret = 0; | ||
5032 | kfree(tbl->slots); | ||
5033 | } | 5057 | } |
5034 | spin_lock(&tbl->slot_tbl_lock); | 5058 | ret = 0; |
5035 | if (new) { | 5059 | |
5036 | tbl->slots = new; | 5060 | nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); |
5037 | tbl->max_slots = max_reqs; | ||
5038 | } | ||
5039 | for (i = 0; i < tbl->max_slots; ++i) | ||
5040 | tbl->slots[i].seq_nr = ivalue; | ||
5041 | spin_unlock(&tbl->slot_tbl_lock); | ||
5042 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | 5061 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, |
5043 | tbl, tbl->slots, tbl->max_slots); | 5062 | tbl, tbl->slots, tbl->max_slots); |
5044 | out: | 5063 | out: |
@@ -5061,36 +5080,6 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session) | |||
5061 | } | 5080 | } |
5062 | 5081 | ||
5063 | /* | 5082 | /* |
5064 | * Initialize slot table | ||
5065 | */ | ||
5066 | static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, | ||
5067 | int max_slots, int ivalue) | ||
5068 | { | ||
5069 | struct nfs4_slot *slot; | ||
5070 | int ret = -ENOMEM; | ||
5071 | |||
5072 | BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); | ||
5073 | |||
5074 | dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); | ||
5075 | |||
5076 | slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS); | ||
5077 | if (!slot) | ||
5078 | goto out; | ||
5079 | ret = 0; | ||
5080 | |||
5081 | spin_lock(&tbl->slot_tbl_lock); | ||
5082 | tbl->max_slots = max_slots; | ||
5083 | tbl->slots = slot; | ||
5084 | tbl->highest_used_slotid = -1; /* no slot is currently used */ | ||
5085 | spin_unlock(&tbl->slot_tbl_lock); | ||
5086 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | ||
5087 | tbl, tbl->slots, tbl->max_slots); | ||
5088 | out: | ||
5089 | dprintk("<-- %s: return %d\n", __func__, ret); | ||
5090 | return ret; | ||
5091 | } | ||
5092 | |||
5093 | /* | ||
5094 | * Initialize or reset the forechannel and backchannel tables | 5083 | * Initialize or reset the forechannel and backchannel tables |
5095 | */ | 5084 | */ |
5096 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) | 5085 | static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) |
@@ -5101,25 +5090,16 @@ static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) | |||
5101 | dprintk("--> %s\n", __func__); | 5090 | dprintk("--> %s\n", __func__); |
5102 | /* Fore channel */ | 5091 | /* Fore channel */ |
5103 | tbl = &ses->fc_slot_table; | 5092 | tbl = &ses->fc_slot_table; |
5104 | if (tbl->slots == NULL) { | 5093 | status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); |
5105 | status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | 5094 | if (status) /* -ENOMEM */ |
5106 | if (status) /* -ENOMEM */ | 5095 | return status; |
5107 | return status; | ||
5108 | } else { | ||
5109 | status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1); | ||
5110 | if (status) | ||
5111 | return status; | ||
5112 | } | ||
5113 | /* Back channel */ | 5096 | /* Back channel */ |
5114 | tbl = &ses->bc_slot_table; | 5097 | tbl = &ses->bc_slot_table; |
5115 | if (tbl->slots == NULL) { | 5098 | status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); |
5116 | status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | 5099 | if (status && tbl->slots == NULL) |
5117 | if (status) | 5100 | /* Fore and back channel share a connection so get |
5118 | /* Fore and back channel share a connection so get | 5101 | * both slot tables or neither */ |
5119 | * both slot tables or neither */ | 5102 | nfs4_destroy_slot_tables(ses); |
5120 | nfs4_destroy_slot_tables(ses); | ||
5121 | } else | ||
5122 | status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0); | ||
5123 | return status; | 5103 | return status; |
5124 | } | 5104 | } |
5125 | 5105 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a53f33b4ac3a..45392032e7bd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1132,6 +1132,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 | |||
1132 | { | 1132 | { |
1133 | struct nfs_client *clp = server->nfs_client; | 1133 | struct nfs_client *clp = server->nfs_client; |
1134 | 1134 | ||
1135 | if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
1136 | nfs_async_inode_return_delegation(state->inode, &state->stateid); | ||
1135 | nfs4_state_mark_reclaim_nograce(clp, state); | 1137 | nfs4_state_mark_reclaim_nograce(clp, state); |
1136 | nfs4_schedule_state_manager(clp); | 1138 | nfs4_schedule_state_manager(clp); |
1137 | } | 1139 | } |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 95e92e438407..33bd8d0f745d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -2522,7 +2522,6 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2522 | 2522 | ||
2523 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, | 2523 | xdr_inline_pages(&req->rq_rcv_buf, replen << 2, |
2524 | args->acl_pages, args->acl_pgbase, args->acl_len); | 2524 | args->acl_pages, args->acl_pgbase, args->acl_len); |
2525 | xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); | ||
2526 | 2525 | ||
2527 | encode_nops(&hdr); | 2526 | encode_nops(&hdr); |
2528 | } | 2527 | } |
@@ -6032,6 +6031,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, | |||
6032 | struct compound_hdr hdr; | 6031 | struct compound_hdr hdr; |
6033 | int status; | 6032 | int status; |
6034 | 6033 | ||
6034 | if (res->acl_scratch != NULL) { | ||
6035 | void *p = page_address(res->acl_scratch); | ||
6036 | xdr_set_scratch_buffer(xdr, p, PAGE_SIZE); | ||
6037 | } | ||
6035 | status = decode_compound_hdr(xdr, &hdr); | 6038 | status = decode_compound_hdr(xdr, &hdr); |
6036 | if (status) | 6039 | if (status) |
6037 | goto out; | 6040 | goto out; |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 886649627c3d..2a70fce70c65 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -603,6 +603,8 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, | |||
603 | nsegs = argv[4].v_nmembs; | 603 | nsegs = argv[4].v_nmembs; |
604 | if (argv[4].v_size != argsz[4]) | 604 | if (argv[4].v_size != argsz[4]) |
605 | goto out; | 605 | goto out; |
606 | if (nsegs > UINT_MAX / sizeof(__u64)) | ||
607 | goto out; | ||
606 | 608 | ||
607 | /* | 609 | /* |
608 | * argv[4] points to segment numbers this ioctl cleans. We | 610 | * argv[4] points to segment numbers this ioctl cleans. We |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index d32714094375..501b7f8b739f 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
409 | nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); | 409 | nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); |
410 | nilfs->ns_r_segments_percentage = | 410 | nilfs->ns_r_segments_percentage = |
411 | le32_to_cpu(sbp->s_r_segments_percentage); | 411 | le32_to_cpu(sbp->s_r_segments_percentage); |
412 | if (nilfs->ns_r_segments_percentage < 1 || | ||
413 | nilfs->ns_r_segments_percentage > 99) { | ||
414 | printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n"); | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
412 | nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); | 418 | nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); |
413 | nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); | 419 | nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); |
414 | return 0; | 420 | return 0; |
@@ -515,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, | |||
515 | brelse(sbh[1]); | 521 | brelse(sbh[1]); |
516 | sbh[1] = NULL; | 522 | sbh[1] = NULL; |
517 | sbp[1] = NULL; | 523 | sbp[1] = NULL; |
524 | valid[1] = 0; | ||
518 | swp = 0; | 525 | swp = 0; |
519 | } | 526 | } |
520 | if (!valid[swp]) { | 527 | if (!valid[swp]) { |
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index f14fde2b03d6..e0281992ddc3 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. | 2 | * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2007 Anton Altaparmakov | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -345,10 +345,10 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | bool is_retry = false; | 346 | bool is_retry = false; |
347 | 347 | ||
348 | BUG_ON(!ni); | ||
348 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", | 349 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", |
349 | ni->mft_no, (unsigned long long)vcn, | 350 | ni->mft_no, (unsigned long long)vcn, |
350 | write_locked ? "write" : "read"); | 351 | write_locked ? "write" : "read"); |
351 | BUG_ON(!ni); | ||
352 | BUG_ON(!NInoNonResident(ni)); | 352 | BUG_ON(!NInoNonResident(ni)); |
353 | BUG_ON(vcn < 0); | 353 | BUG_ON(vcn < 0); |
354 | if (!ni->runlist.rl) { | 354 | if (!ni->runlist.rl) { |
@@ -469,9 +469,9 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, | |||
469 | int err = 0; | 469 | int err = 0; |
470 | bool is_retry = false; | 470 | bool is_retry = false; |
471 | 471 | ||
472 | BUG_ON(!ni); | ||
472 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", | 473 | ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", |
473 | ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); | 474 | ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); |
474 | BUG_ON(!ni); | ||
475 | BUG_ON(!NInoNonResident(ni)); | 475 | BUG_ON(!NInoNonResident(ni)); |
476 | BUG_ON(vcn < 0); | 476 | BUG_ON(vcn < 0); |
477 | if (!ni->runlist.rl) { | 477 | if (!ni->runlist.rl) { |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 382857f9c7db..3014a36a255b 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. | 2 | * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -1367,7 +1367,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) | |||
1367 | ntfs_error(vol->sb, "Failed to merge runlists for mft " | 1367 | ntfs_error(vol->sb, "Failed to merge runlists for mft " |
1368 | "bitmap."); | 1368 | "bitmap."); |
1369 | if (ntfs_cluster_free_from_rl(vol, rl2)) { | 1369 | if (ntfs_cluster_free_from_rl(vol, rl2)) { |
1370 | ntfs_error(vol->sb, "Failed to dealocate " | 1370 | ntfs_error(vol->sb, "Failed to deallocate " |
1371 | "allocated cluster.%s", es); | 1371 | "allocated cluster.%s", es); |
1372 | NVolSetErrors(vol); | 1372 | NVolSetErrors(vol); |
1373 | } | 1373 | } |
@@ -1805,7 +1805,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol) | |||
1805 | ntfs_error(vol->sb, "Failed to merge runlists for mft data " | 1805 | ntfs_error(vol->sb, "Failed to merge runlists for mft data " |
1806 | "attribute."); | 1806 | "attribute."); |
1807 | if (ntfs_cluster_free_from_rl(vol, rl2)) { | 1807 | if (ntfs_cluster_free_from_rl(vol, rl2)) { |
1808 | ntfs_error(vol->sb, "Failed to dealocate clusters " | 1808 | ntfs_error(vol->sb, "Failed to deallocate clusters " |
1809 | "from the mft data attribute.%s", es); | 1809 | "from the mft data attribute.%s", es); |
1810 | NVolSetErrors(vol); | 1810 | NVolSetErrors(vol); |
1811 | } | 1811 | } |
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 5a4a8af5c406..f907611cca73 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. | 2 | * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. | 4 | * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. |
5 | * Copyright (c) 2001,2002 Richard Russon | 5 | * Copyright (c) 2001,2002 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
@@ -1239,7 +1239,6 @@ static int check_windows_hibernation_status(ntfs_volume *vol) | |||
1239 | { | 1239 | { |
1240 | MFT_REF mref; | 1240 | MFT_REF mref; |
1241 | struct inode *vi; | 1241 | struct inode *vi; |
1242 | ntfs_inode *ni; | ||
1243 | struct page *page; | 1242 | struct page *page; |
1244 | u32 *kaddr, *kend; | 1243 | u32 *kaddr, *kend; |
1245 | ntfs_name *name = NULL; | 1244 | ntfs_name *name = NULL; |
@@ -1290,7 +1289,6 @@ static int check_windows_hibernation_status(ntfs_volume *vol) | |||
1290 | "is not the system volume.", i_size_read(vi)); | 1289 | "is not the system volume.", i_size_read(vi)); |
1291 | goto iput_out; | 1290 | goto iput_out; |
1292 | } | 1291 | } |
1293 | ni = NTFS_I(vi); | ||
1294 | page = ntfs_map_page(vi->i_mapping, 0); | 1292 | page = ntfs_map_page(vi->i_mapping, 0); |
1295 | if (IS_ERR(page)) { | 1293 | if (IS_ERR(page)) { |
1296 | ntfs_error(vol->sb, "Failed to read from hiberfil.sys."); | 1294 | ntfs_error(vol->sb, "Failed to read from hiberfil.sys."); |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index be244692550d..a9856e3eaaf0 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -1053,7 +1053,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1053 | handle_t *handle = NULL; | 1053 | handle_t *handle = NULL; |
1054 | struct buffer_head *old_dir_bh = NULL; | 1054 | struct buffer_head *old_dir_bh = NULL; |
1055 | struct buffer_head *new_dir_bh = NULL; | 1055 | struct buffer_head *new_dir_bh = NULL; |
1056 | nlink_t old_dir_nlink = old_dir->i_nlink; | 1056 | u32 old_dir_nlink = old_dir->i_nlink; |
1057 | struct ocfs2_dinode *old_di; | 1057 | struct ocfs2_dinode *old_di; |
1058 | struct ocfs2_dir_lookup_result old_inode_dot_dot_res = { NULL, }; | 1058 | struct ocfs2_dir_lookup_result old_inode_dot_dot_res = { NULL, }; |
1059 | struct ocfs2_dir_lookup_result target_lookup_res = { NULL, }; | 1059 | struct ocfs2_dir_lookup_result target_lookup_res = { NULL, }; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 9cde9edf9c4d..d4548dd49b02 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path) | |||
198 | return result; | 198 | return result; |
199 | } | 199 | } |
200 | 200 | ||
201 | static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | ||
202 | { | ||
203 | struct mm_struct *mm; | ||
204 | int err; | ||
205 | |||
206 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); | ||
207 | if (err) | ||
208 | return ERR_PTR(err); | ||
209 | |||
210 | mm = get_task_mm(task); | ||
211 | if (mm && mm != current->mm && | ||
212 | !ptrace_may_access(task, mode)) { | ||
213 | mmput(mm); | ||
214 | mm = ERR_PTR(-EACCES); | ||
215 | } | ||
216 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
217 | |||
218 | return mm; | ||
219 | } | ||
220 | |||
221 | struct mm_struct *mm_for_maps(struct task_struct *task) | 201 | struct mm_struct *mm_for_maps(struct task_struct *task) |
222 | { | 202 | { |
223 | return mm_access(task, PTRACE_MODE_READ); | 203 | return mm_access(task, PTRACE_MODE_READ); |
@@ -711,6 +691,13 @@ static int mem_open(struct inode* inode, struct file* file) | |||
711 | if (IS_ERR(mm)) | 691 | if (IS_ERR(mm)) |
712 | return PTR_ERR(mm); | 692 | return PTR_ERR(mm); |
713 | 693 | ||
694 | if (mm) { | ||
695 | /* ensure this mm_struct can't be freed */ | ||
696 | atomic_inc(&mm->mm_count); | ||
697 | /* but do not pin its memory */ | ||
698 | mmput(mm); | ||
699 | } | ||
700 | |||
714 | /* OK to pass negative loff_t, we can catch out-of-range */ | 701 | /* OK to pass negative loff_t, we can catch out-of-range */ |
715 | file->f_mode |= FMODE_UNSIGNED_OFFSET; | 702 | file->f_mode |= FMODE_UNSIGNED_OFFSET; |
716 | file->private_data = mm; | 703 | file->private_data = mm; |
@@ -718,57 +705,13 @@ static int mem_open(struct inode* inode, struct file* file) | |||
718 | return 0; | 705 | return 0; |
719 | } | 706 | } |
720 | 707 | ||
721 | static ssize_t mem_read(struct file * file, char __user * buf, | 708 | static ssize_t mem_rw(struct file *file, char __user *buf, |
722 | size_t count, loff_t *ppos) | 709 | size_t count, loff_t *ppos, int write) |
723 | { | 710 | { |
724 | int ret; | ||
725 | char *page; | ||
726 | unsigned long src = *ppos; | ||
727 | struct mm_struct *mm = file->private_data; | 711 | struct mm_struct *mm = file->private_data; |
728 | 712 | unsigned long addr = *ppos; | |
729 | if (!mm) | 713 | ssize_t copied; |
730 | return 0; | ||
731 | |||
732 | page = (char *)__get_free_page(GFP_TEMPORARY); | ||
733 | if (!page) | ||
734 | return -ENOMEM; | ||
735 | |||
736 | ret = 0; | ||
737 | |||
738 | while (count > 0) { | ||
739 | int this_len, retval; | ||
740 | |||
741 | this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; | ||
742 | retval = access_remote_vm(mm, src, page, this_len, 0); | ||
743 | if (!retval) { | ||
744 | if (!ret) | ||
745 | ret = -EIO; | ||
746 | break; | ||
747 | } | ||
748 | |||
749 | if (copy_to_user(buf, page, retval)) { | ||
750 | ret = -EFAULT; | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | ret += retval; | ||
755 | src += retval; | ||
756 | buf += retval; | ||
757 | count -= retval; | ||
758 | } | ||
759 | *ppos = src; | ||
760 | |||
761 | free_page((unsigned long) page); | ||
762 | return ret; | ||
763 | } | ||
764 | |||
765 | static ssize_t mem_write(struct file * file, const char __user *buf, | ||
766 | size_t count, loff_t *ppos) | ||
767 | { | ||
768 | int copied; | ||
769 | char *page; | 714 | char *page; |
770 | unsigned long dst = *ppos; | ||
771 | struct mm_struct *mm = file->private_data; | ||
772 | 715 | ||
773 | if (!mm) | 716 | if (!mm) |
774 | return 0; | 717 | return 0; |
@@ -778,31 +721,54 @@ static ssize_t mem_write(struct file * file, const char __user *buf, | |||
778 | return -ENOMEM; | 721 | return -ENOMEM; |
779 | 722 | ||
780 | copied = 0; | 723 | copied = 0; |
724 | if (!atomic_inc_not_zero(&mm->mm_users)) | ||
725 | goto free; | ||
726 | |||
781 | while (count > 0) { | 727 | while (count > 0) { |
782 | int this_len, retval; | 728 | int this_len = min_t(int, count, PAGE_SIZE); |
783 | 729 | ||
784 | this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; | 730 | if (write && copy_from_user(page, buf, this_len)) { |
785 | if (copy_from_user(page, buf, this_len)) { | ||
786 | copied = -EFAULT; | 731 | copied = -EFAULT; |
787 | break; | 732 | break; |
788 | } | 733 | } |
789 | retval = access_remote_vm(mm, dst, page, this_len, 1); | 734 | |
790 | if (!retval) { | 735 | this_len = access_remote_vm(mm, addr, page, this_len, write); |
736 | if (!this_len) { | ||
791 | if (!copied) | 737 | if (!copied) |
792 | copied = -EIO; | 738 | copied = -EIO; |
793 | break; | 739 | break; |
794 | } | 740 | } |
795 | copied += retval; | 741 | |
796 | buf += retval; | 742 | if (!write && copy_to_user(buf, page, this_len)) { |
797 | dst += retval; | 743 | copied = -EFAULT; |
798 | count -= retval; | 744 | break; |
745 | } | ||
746 | |||
747 | buf += this_len; | ||
748 | addr += this_len; | ||
749 | copied += this_len; | ||
750 | count -= this_len; | ||
799 | } | 751 | } |
800 | *ppos = dst; | 752 | *ppos = addr; |
801 | 753 | ||
754 | mmput(mm); | ||
755 | free: | ||
802 | free_page((unsigned long) page); | 756 | free_page((unsigned long) page); |
803 | return copied; | 757 | return copied; |
804 | } | 758 | } |
805 | 759 | ||
760 | static ssize_t mem_read(struct file *file, char __user *buf, | ||
761 | size_t count, loff_t *ppos) | ||
762 | { | ||
763 | return mem_rw(file, buf, count, ppos, 0); | ||
764 | } | ||
765 | |||
766 | static ssize_t mem_write(struct file *file, const char __user *buf, | ||
767 | size_t count, loff_t *ppos) | ||
768 | { | ||
769 | return mem_rw(file, (char __user*)buf, count, ppos, 1); | ||
770 | } | ||
771 | |||
806 | loff_t mem_lseek(struct file *file, loff_t offset, int orig) | 772 | loff_t mem_lseek(struct file *file, loff_t offset, int orig) |
807 | { | 773 | { |
808 | switch (orig) { | 774 | switch (orig) { |
@@ -822,8 +788,8 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig) | |||
822 | static int mem_release(struct inode *inode, struct file *file) | 788 | static int mem_release(struct inode *inode, struct file *file) |
823 | { | 789 | { |
824 | struct mm_struct *mm = file->private_data; | 790 | struct mm_struct *mm = file->private_data; |
825 | 791 | if (mm) | |
826 | mmput(mm); | 792 | mmdrop(mm); |
827 | return 0; | 793 | return 0; |
828 | } | 794 | } |
829 | 795 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e418c5abdb0e..7dcd2a250495 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
518 | if (!page) | 518 | if (!page) |
519 | continue; | 519 | continue; |
520 | 520 | ||
521 | if (PageReserved(page)) | ||
522 | continue; | ||
523 | |||
521 | /* Clear accessed and referenced bits. */ | 524 | /* Clear accessed and referenced bits. */ |
522 | ptep_test_and_clear_young(vma, addr, pte); | 525 | ptep_test_and_clear_young(vma, addr, pte); |
523 | ClearPageReferenced(page); | 526 | ClearPageReferenced(page); |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 5ec59b20cf76..46741970371b 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2125,6 +2125,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | |||
2125 | mutex_unlock(&dqopt->dqio_mutex); | 2125 | mutex_unlock(&dqopt->dqio_mutex); |
2126 | goto out_file_init; | 2126 | goto out_file_init; |
2127 | } | 2127 | } |
2128 | if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) | ||
2129 | dqopt->info[type].dqi_flags |= DQF_SYS_FILE; | ||
2128 | mutex_unlock(&dqopt->dqio_mutex); | 2130 | mutex_unlock(&dqopt->dqio_mutex); |
2129 | spin_lock(&dq_state_lock); | 2131 | spin_lock(&dq_state_lock); |
2130 | dqopt->flags |= dquot_state_flag(flags, type); | 2132 | dqopt->flags |= dquot_state_flag(flags, type); |
@@ -2464,7 +2466,7 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
2464 | spin_lock(&dq_data_lock); | 2466 | spin_lock(&dq_data_lock); |
2465 | ii->dqi_bgrace = mi->dqi_bgrace; | 2467 | ii->dqi_bgrace = mi->dqi_bgrace; |
2466 | ii->dqi_igrace = mi->dqi_igrace; | 2468 | ii->dqi_igrace = mi->dqi_igrace; |
2467 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | 2469 | ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK; |
2468 | ii->dqi_valid = IIF_ALL; | 2470 | ii->dqi_valid = IIF_ALL; |
2469 | spin_unlock(&dq_data_lock); | 2471 | spin_unlock(&dq_data_lock); |
2470 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | 2472 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
@@ -2490,8 +2492,8 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
2490 | if (ii->dqi_valid & IIF_IGRACE) | 2492 | if (ii->dqi_valid & IIF_IGRACE) |
2491 | mi->dqi_igrace = ii->dqi_igrace; | 2493 | mi->dqi_igrace = ii->dqi_igrace; |
2492 | if (ii->dqi_valid & IIF_FLAGS) | 2494 | if (ii->dqi_valid & IIF_FLAGS) |
2493 | mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | | 2495 | mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) | |
2494 | (ii->dqi_flags & DQF_MASK); | 2496 | (ii->dqi_flags & DQF_SETINFO_MASK); |
2495 | spin_unlock(&dq_data_lock); | 2497 | spin_unlock(&dq_data_lock); |
2496 | mark_info_dirty(sb, type); | 2498 | mark_info_dirty(sb, type); |
2497 | /* Force write to disk */ | 2499 | /* Force write to disk */ |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 7898cd688a00..fc2c4388d126 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -292,11 +292,26 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
292 | } | 292 | } |
293 | } | 293 | } |
294 | 294 | ||
295 | /* Return 1 if 'cmd' will block on frozen filesystem */ | ||
296 | static int quotactl_cmd_write(int cmd) | ||
297 | { | ||
298 | switch (cmd) { | ||
299 | case Q_GETFMT: | ||
300 | case Q_GETINFO: | ||
301 | case Q_SYNC: | ||
302 | case Q_XGETQSTAT: | ||
303 | case Q_XGETQUOTA: | ||
304 | case Q_XQUOTASYNC: | ||
305 | return 0; | ||
306 | } | ||
307 | return 1; | ||
308 | } | ||
309 | |||
295 | /* | 310 | /* |
296 | * look up a superblock on which quota ops will be performed | 311 | * look up a superblock on which quota ops will be performed |
297 | * - use the name of a block device to find the superblock thereon | 312 | * - use the name of a block device to find the superblock thereon |
298 | */ | 313 | */ |
299 | static struct super_block *quotactl_block(const char __user *special) | 314 | static struct super_block *quotactl_block(const char __user *special, int cmd) |
300 | { | 315 | { |
301 | #ifdef CONFIG_BLOCK | 316 | #ifdef CONFIG_BLOCK |
302 | struct block_device *bdev; | 317 | struct block_device *bdev; |
@@ -309,7 +324,10 @@ static struct super_block *quotactl_block(const char __user *special) | |||
309 | putname(tmp); | 324 | putname(tmp); |
310 | if (IS_ERR(bdev)) | 325 | if (IS_ERR(bdev)) |
311 | return ERR_CAST(bdev); | 326 | return ERR_CAST(bdev); |
312 | sb = get_super(bdev); | 327 | if (quotactl_cmd_write(cmd)) |
328 | sb = get_super_thawed(bdev); | ||
329 | else | ||
330 | sb = get_super(bdev); | ||
313 | bdput(bdev); | 331 | bdput(bdev); |
314 | if (!sb) | 332 | if (!sb) |
315 | return ERR_PTR(-ENODEV); | 333 | return ERR_PTR(-ENODEV); |
@@ -361,7 +379,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
361 | pathp = &path; | 379 | pathp = &path; |
362 | } | 380 | } |
363 | 381 | ||
364 | sb = quotactl_block(special); | 382 | sb = quotactl_block(special, cmds); |
365 | if (IS_ERR(sb)) { | 383 | if (IS_ERR(sb)) { |
366 | ret = PTR_ERR(sb); | 384 | ret = PTR_ERR(sb); |
367 | goto out; | 385 | goto out; |
diff --git a/fs/select.c b/fs/select.c index d33418fdc858..e782258d0de3 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -912,7 +912,7 @@ static long do_restart_poll(struct restart_block *restart_block) | |||
912 | } | 912 | } |
913 | 913 | ||
914 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, | 914 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, |
915 | long, timeout_msecs) | 915 | int, timeout_msecs) |
916 | { | 916 | { |
917 | struct timespec end_time, *to = NULL; | 917 | struct timespec end_time, *to = NULL; |
918 | int ret; | 918 | int ret; |
diff --git a/fs/signalfd.c b/fs/signalfd.c index 492465b451dd..7ae2a574cb25 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -30,6 +30,21 @@ | |||
30 | #include <linux/signalfd.h> | 30 | #include <linux/signalfd.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | 32 | ||
33 | void signalfd_cleanup(struct sighand_struct *sighand) | ||
34 | { | ||
35 | wait_queue_head_t *wqh = &sighand->signalfd_wqh; | ||
36 | /* | ||
37 | * The lockless check can race with remove_wait_queue() in progress, | ||
38 | * but in this case its caller should run under rcu_read_lock() and | ||
39 | * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. | ||
40 | */ | ||
41 | if (likely(!waitqueue_active(wqh))) | ||
42 | return; | ||
43 | |||
44 | /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ | ||
45 | wake_up_poll(wqh, POLLHUP | POLLFREE); | ||
46 | } | ||
47 | |||
33 | struct signalfd_ctx { | 48 | struct signalfd_ctx { |
34 | sigset_t sigmask; | 49 | sigset_t sigmask; |
35 | }; | 50 | }; |
diff --git a/fs/super.c b/fs/super.c index 6015c02296b7..6277ec6cb60a 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -634,6 +634,28 @@ rescan: | |||
634 | EXPORT_SYMBOL(get_super); | 634 | EXPORT_SYMBOL(get_super); |
635 | 635 | ||
636 | /** | 636 | /** |
637 | * get_super_thawed - get thawed superblock of a device | ||
638 | * @bdev: device to get the superblock for | ||
639 | * | ||
640 | * Scans the superblock list and finds the superblock of the file system | ||
641 | * mounted on the device. The superblock is returned once it is thawed | ||
642 | * (or immediately if it was not frozen). %NULL is returned if no match | ||
643 | * is found. | ||
644 | */ | ||
645 | struct super_block *get_super_thawed(struct block_device *bdev) | ||
646 | { | ||
647 | while (1) { | ||
648 | struct super_block *s = get_super(bdev); | ||
649 | if (!s || s->s_frozen == SB_UNFROZEN) | ||
650 | return s; | ||
651 | up_read(&s->s_umount); | ||
652 | vfs_check_frozen(s, SB_FREEZE_WRITE); | ||
653 | put_super(s); | ||
654 | } | ||
655 | } | ||
656 | EXPORT_SYMBOL(get_super_thawed); | ||
657 | |||
658 | /** | ||
637 | * get_active_super - get an active reference to the superblock of a device | 659 | * get_active_super - get an active reference to the superblock of a device |
638 | * @bdev: device to get the superblock for | 660 | * @bdev: device to get the superblock for |
639 | * | 661 | * |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 62f4fb37789e..00012e31829d 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -493,6 +493,12 @@ int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr, | |||
493 | const void *ns = NULL; | 493 | const void *ns = NULL; |
494 | int err; | 494 | int err; |
495 | 495 | ||
496 | if (!dir_sd) { | ||
497 | WARN(1, KERN_ERR "sysfs: kobject %s without dirent\n", | ||
498 | kobject_name(kobj)); | ||
499 | return -ENOENT; | ||
500 | } | ||
501 | |||
496 | err = 0; | 502 | err = 0; |
497 | if (!sysfs_ns_type(dir_sd)) | 503 | if (!sysfs_ns_type(dir_sd)) |
498 | goto out; | 504 | goto out; |
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 4a802b4a9056..85eb81683a29 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c | |||
@@ -318,8 +318,11 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha | |||
318 | struct sysfs_addrm_cxt acxt; | 318 | struct sysfs_addrm_cxt acxt; |
319 | struct sysfs_dirent *sd; | 319 | struct sysfs_dirent *sd; |
320 | 320 | ||
321 | if (!dir_sd) | 321 | if (!dir_sd) { |
322 | WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n", | ||
323 | name); | ||
322 | return -ENOENT; | 324 | return -ENOENT; |
325 | } | ||
323 | 326 | ||
324 | sysfs_addrm_start(&acxt, dir_sd); | 327 | sysfs_addrm_start(&acxt, dir_sd); |
325 | 328 | ||
diff --git a/fs/udf/file.c b/fs/udf/file.c index dca0c3881e82..d567b8448dfc 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -201,12 +201,10 @@ out: | |||
201 | static int udf_release_file(struct inode *inode, struct file *filp) | 201 | static int udf_release_file(struct inode *inode, struct file *filp) |
202 | { | 202 | { |
203 | if (filp->f_mode & FMODE_WRITE) { | 203 | if (filp->f_mode & FMODE_WRITE) { |
204 | mutex_lock(&inode->i_mutex); | ||
205 | down_write(&UDF_I(inode)->i_data_sem); | 204 | down_write(&UDF_I(inode)->i_data_sem); |
206 | udf_discard_prealloc(inode); | 205 | udf_discard_prealloc(inode); |
207 | udf_truncate_tail_extent(inode); | 206 | udf_truncate_tail_extent(inode); |
208 | up_write(&UDF_I(inode)->i_data_sem); | 207 | up_write(&UDF_I(inode)->i_data_sem); |
209 | mutex_unlock(&inode->i_mutex); | ||
210 | } | 208 | } |
211 | return 0; | 209 | return 0; |
212 | } | 210 | } |
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 292eff198030..ab7c53fe346e 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h | |||
@@ -110,10 +110,4 @@ kmem_zone_destroy(kmem_zone_t *zone) | |||
110 | extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); | 110 | extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); |
111 | extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); | 111 | extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); |
112 | 112 | ||
113 | static inline int | ||
114 | kmem_shake_allow(gfp_t gfp_mask) | ||
115 | { | ||
116 | return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); | ||
117 | } | ||
118 | |||
119 | #endif /* __XFS_SUPPORT_KMEM_H__ */ | 113 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index b4ff40b5f918..53db20ee3e77 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -63,82 +63,6 @@ int xfs_dqerror_mod = 33; | |||
63 | static struct lock_class_key xfs_dquot_other_class; | 63 | static struct lock_class_key xfs_dquot_other_class; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Allocate and initialize a dquot. We don't always allocate fresh memory; | ||
67 | * we try to reclaim a free dquot if the number of incore dquots are above | ||
68 | * a threshold. | ||
69 | * The only field inside the core that gets initialized at this point | ||
70 | * is the d_id field. The idea is to fill in the entire q_core | ||
71 | * when we read in the on disk dquot. | ||
72 | */ | ||
73 | STATIC xfs_dquot_t * | ||
74 | xfs_qm_dqinit( | ||
75 | xfs_mount_t *mp, | ||
76 | xfs_dqid_t id, | ||
77 | uint type) | ||
78 | { | ||
79 | xfs_dquot_t *dqp; | ||
80 | boolean_t brandnewdquot; | ||
81 | |||
82 | brandnewdquot = xfs_qm_dqalloc_incore(&dqp); | ||
83 | dqp->dq_flags = type; | ||
84 | dqp->q_core.d_id = cpu_to_be32(id); | ||
85 | dqp->q_mount = mp; | ||
86 | |||
87 | /* | ||
88 | * No need to re-initialize these if this is a reclaimed dquot. | ||
89 | */ | ||
90 | if (brandnewdquot) { | ||
91 | INIT_LIST_HEAD(&dqp->q_freelist); | ||
92 | mutex_init(&dqp->q_qlock); | ||
93 | init_waitqueue_head(&dqp->q_pinwait); | ||
94 | |||
95 | /* | ||
96 | * Because we want to use a counting completion, complete | ||
97 | * the flush completion once to allow a single access to | ||
98 | * the flush completion without blocking. | ||
99 | */ | ||
100 | init_completion(&dqp->q_flush); | ||
101 | complete(&dqp->q_flush); | ||
102 | |||
103 | trace_xfs_dqinit(dqp); | ||
104 | } else { | ||
105 | /* | ||
106 | * Only the q_core portion was zeroed in dqreclaim_one(). | ||
107 | * So, we need to reset others. | ||
108 | */ | ||
109 | dqp->q_nrefs = 0; | ||
110 | dqp->q_blkno = 0; | ||
111 | INIT_LIST_HEAD(&dqp->q_mplist); | ||
112 | INIT_LIST_HEAD(&dqp->q_hashlist); | ||
113 | dqp->q_bufoffset = 0; | ||
114 | dqp->q_fileoffset = 0; | ||
115 | dqp->q_transp = NULL; | ||
116 | dqp->q_gdquot = NULL; | ||
117 | dqp->q_res_bcount = 0; | ||
118 | dqp->q_res_icount = 0; | ||
119 | dqp->q_res_rtbcount = 0; | ||
120 | atomic_set(&dqp->q_pincount, 0); | ||
121 | dqp->q_hash = NULL; | ||
122 | ASSERT(list_empty(&dqp->q_freelist)); | ||
123 | |||
124 | trace_xfs_dqreuse(dqp); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * In either case we need to make sure group quotas have a different | ||
129 | * lock class than user quotas, to make sure lockdep knows we can | ||
130 | * locks of one of each at the same time. | ||
131 | */ | ||
132 | if (!(type & XFS_DQ_USER)) | ||
133 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); | ||
134 | |||
135 | /* | ||
136 | * log item gets initialized later | ||
137 | */ | ||
138 | return (dqp); | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * This is called to free all the memory associated with a dquot | 66 | * This is called to free all the memory associated with a dquot |
143 | */ | 67 | */ |
144 | void | 68 | void |
@@ -215,10 +139,10 @@ xfs_qm_adjust_dqtimers( | |||
215 | 139 | ||
216 | if (!d->d_btimer) { | 140 | if (!d->d_btimer) { |
217 | if ((d->d_blk_softlimit && | 141 | if ((d->d_blk_softlimit && |
218 | (be64_to_cpu(d->d_bcount) >= | 142 | (be64_to_cpu(d->d_bcount) > |
219 | be64_to_cpu(d->d_blk_softlimit))) || | 143 | be64_to_cpu(d->d_blk_softlimit))) || |
220 | (d->d_blk_hardlimit && | 144 | (d->d_blk_hardlimit && |
221 | (be64_to_cpu(d->d_bcount) >= | 145 | (be64_to_cpu(d->d_bcount) > |
222 | be64_to_cpu(d->d_blk_hardlimit)))) { | 146 | be64_to_cpu(d->d_blk_hardlimit)))) { |
223 | d->d_btimer = cpu_to_be32(get_seconds() + | 147 | d->d_btimer = cpu_to_be32(get_seconds() + |
224 | mp->m_quotainfo->qi_btimelimit); | 148 | mp->m_quotainfo->qi_btimelimit); |
@@ -227,10 +151,10 @@ xfs_qm_adjust_dqtimers( | |||
227 | } | 151 | } |
228 | } else { | 152 | } else { |
229 | if ((!d->d_blk_softlimit || | 153 | if ((!d->d_blk_softlimit || |
230 | (be64_to_cpu(d->d_bcount) < | 154 | (be64_to_cpu(d->d_bcount) <= |
231 | be64_to_cpu(d->d_blk_softlimit))) && | 155 | be64_to_cpu(d->d_blk_softlimit))) && |
232 | (!d->d_blk_hardlimit || | 156 | (!d->d_blk_hardlimit || |
233 | (be64_to_cpu(d->d_bcount) < | 157 | (be64_to_cpu(d->d_bcount) <= |
234 | be64_to_cpu(d->d_blk_hardlimit)))) { | 158 | be64_to_cpu(d->d_blk_hardlimit)))) { |
235 | d->d_btimer = 0; | 159 | d->d_btimer = 0; |
236 | } | 160 | } |
@@ -238,10 +162,10 @@ xfs_qm_adjust_dqtimers( | |||
238 | 162 | ||
239 | if (!d->d_itimer) { | 163 | if (!d->d_itimer) { |
240 | if ((d->d_ino_softlimit && | 164 | if ((d->d_ino_softlimit && |
241 | (be64_to_cpu(d->d_icount) >= | 165 | (be64_to_cpu(d->d_icount) > |
242 | be64_to_cpu(d->d_ino_softlimit))) || | 166 | be64_to_cpu(d->d_ino_softlimit))) || |
243 | (d->d_ino_hardlimit && | 167 | (d->d_ino_hardlimit && |
244 | (be64_to_cpu(d->d_icount) >= | 168 | (be64_to_cpu(d->d_icount) > |
245 | be64_to_cpu(d->d_ino_hardlimit)))) { | 169 | be64_to_cpu(d->d_ino_hardlimit)))) { |
246 | d->d_itimer = cpu_to_be32(get_seconds() + | 170 | d->d_itimer = cpu_to_be32(get_seconds() + |
247 | mp->m_quotainfo->qi_itimelimit); | 171 | mp->m_quotainfo->qi_itimelimit); |
@@ -250,10 +174,10 @@ xfs_qm_adjust_dqtimers( | |||
250 | } | 174 | } |
251 | } else { | 175 | } else { |
252 | if ((!d->d_ino_softlimit || | 176 | if ((!d->d_ino_softlimit || |
253 | (be64_to_cpu(d->d_icount) < | 177 | (be64_to_cpu(d->d_icount) <= |
254 | be64_to_cpu(d->d_ino_softlimit))) && | 178 | be64_to_cpu(d->d_ino_softlimit))) && |
255 | (!d->d_ino_hardlimit || | 179 | (!d->d_ino_hardlimit || |
256 | (be64_to_cpu(d->d_icount) < | 180 | (be64_to_cpu(d->d_icount) <= |
257 | be64_to_cpu(d->d_ino_hardlimit)))) { | 181 | be64_to_cpu(d->d_ino_hardlimit)))) { |
258 | d->d_itimer = 0; | 182 | d->d_itimer = 0; |
259 | } | 183 | } |
@@ -261,10 +185,10 @@ xfs_qm_adjust_dqtimers( | |||
261 | 185 | ||
262 | if (!d->d_rtbtimer) { | 186 | if (!d->d_rtbtimer) { |
263 | if ((d->d_rtb_softlimit && | 187 | if ((d->d_rtb_softlimit && |
264 | (be64_to_cpu(d->d_rtbcount) >= | 188 | (be64_to_cpu(d->d_rtbcount) > |
265 | be64_to_cpu(d->d_rtb_softlimit))) || | 189 | be64_to_cpu(d->d_rtb_softlimit))) || |
266 | (d->d_rtb_hardlimit && | 190 | (d->d_rtb_hardlimit && |
267 | (be64_to_cpu(d->d_rtbcount) >= | 191 | (be64_to_cpu(d->d_rtbcount) > |
268 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 192 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
269 | d->d_rtbtimer = cpu_to_be32(get_seconds() + | 193 | d->d_rtbtimer = cpu_to_be32(get_seconds() + |
270 | mp->m_quotainfo->qi_rtbtimelimit); | 194 | mp->m_quotainfo->qi_rtbtimelimit); |
@@ -273,10 +197,10 @@ xfs_qm_adjust_dqtimers( | |||
273 | } | 197 | } |
274 | } else { | 198 | } else { |
275 | if ((!d->d_rtb_softlimit || | 199 | if ((!d->d_rtb_softlimit || |
276 | (be64_to_cpu(d->d_rtbcount) < | 200 | (be64_to_cpu(d->d_rtbcount) <= |
277 | be64_to_cpu(d->d_rtb_softlimit))) && | 201 | be64_to_cpu(d->d_rtb_softlimit))) && |
278 | (!d->d_rtb_hardlimit || | 202 | (!d->d_rtb_hardlimit || |
279 | (be64_to_cpu(d->d_rtbcount) < | 203 | (be64_to_cpu(d->d_rtbcount) <= |
280 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 204 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
281 | d->d_rtbtimer = 0; | 205 | d->d_rtbtimer = 0; |
282 | } | 206 | } |
@@ -567,7 +491,32 @@ xfs_qm_dqread( | |||
567 | int error; | 491 | int error; |
568 | int cancelflags = 0; | 492 | int cancelflags = 0; |
569 | 493 | ||
570 | dqp = xfs_qm_dqinit(mp, id, type); | 494 | |
495 | dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); | ||
496 | |||
497 | dqp->dq_flags = type; | ||
498 | dqp->q_core.d_id = cpu_to_be32(id); | ||
499 | dqp->q_mount = mp; | ||
500 | INIT_LIST_HEAD(&dqp->q_freelist); | ||
501 | mutex_init(&dqp->q_qlock); | ||
502 | init_waitqueue_head(&dqp->q_pinwait); | ||
503 | |||
504 | /* | ||
505 | * Because we want to use a counting completion, complete | ||
506 | * the flush completion once to allow a single access to | ||
507 | * the flush completion without blocking. | ||
508 | */ | ||
509 | init_completion(&dqp->q_flush); | ||
510 | complete(&dqp->q_flush); | ||
511 | |||
512 | /* | ||
513 | * Make sure group quotas have a different lock class than user | ||
514 | * quotas. | ||
515 | */ | ||
516 | if (!(type & XFS_DQ_USER)) | ||
517 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); | ||
518 | |||
519 | atomic_inc(&xfs_Gqm->qm_totaldquots); | ||
571 | 520 | ||
572 | trace_xfs_dqread(dqp); | 521 | trace_xfs_dqread(dqp); |
573 | 522 | ||
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 541a508adea1..0ed9ee77937c 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1489,7 +1489,7 @@ xlog_recover_add_to_cont_trans( | |||
1489 | old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; | 1489 | old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; |
1490 | old_len = item->ri_buf[item->ri_cnt-1].i_len; | 1490 | old_len = item->ri_buf[item->ri_cnt-1].i_len; |
1491 | 1491 | ||
1492 | ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u); | 1492 | ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); |
1493 | memcpy(&ptr[old_len], dp, len); /* d, s, l */ | 1493 | memcpy(&ptr[old_len], dp, len); /* d, s, l */ |
1494 | item->ri_buf[item->ri_cnt-1].i_len += len; | 1494 | item->ri_buf[item->ri_cnt-1].i_len += len; |
1495 | item->ri_buf[item->ri_cnt-1].i_addr = ptr; | 1495 | item->ri_buf[item->ri_cnt-1].i_addr = ptr; |
@@ -1981,7 +1981,7 @@ xfs_qm_dqcheck( | |||
1981 | 1981 | ||
1982 | if (!errs && ddq->d_id) { | 1982 | if (!errs && ddq->d_id) { |
1983 | if (ddq->d_blk_softlimit && | 1983 | if (ddq->d_blk_softlimit && |
1984 | be64_to_cpu(ddq->d_bcount) >= | 1984 | be64_to_cpu(ddq->d_bcount) > |
1985 | be64_to_cpu(ddq->d_blk_softlimit)) { | 1985 | be64_to_cpu(ddq->d_blk_softlimit)) { |
1986 | if (!ddq->d_btimer) { | 1986 | if (!ddq->d_btimer) { |
1987 | if (flags & XFS_QMOPT_DOWARN) | 1987 | if (flags & XFS_QMOPT_DOWARN) |
@@ -1992,7 +1992,7 @@ xfs_qm_dqcheck( | |||
1992 | } | 1992 | } |
1993 | } | 1993 | } |
1994 | if (ddq->d_ino_softlimit && | 1994 | if (ddq->d_ino_softlimit && |
1995 | be64_to_cpu(ddq->d_icount) >= | 1995 | be64_to_cpu(ddq->d_icount) > |
1996 | be64_to_cpu(ddq->d_ino_softlimit)) { | 1996 | be64_to_cpu(ddq->d_ino_softlimit)) { |
1997 | if (!ddq->d_itimer) { | 1997 | if (!ddq->d_itimer) { |
1998 | if (flags & XFS_QMOPT_DOWARN) | 1998 | if (flags & XFS_QMOPT_DOWARN) |
@@ -2003,7 +2003,7 @@ xfs_qm_dqcheck( | |||
2003 | } | 2003 | } |
2004 | } | 2004 | } |
2005 | if (ddq->d_rtb_softlimit && | 2005 | if (ddq->d_rtb_softlimit && |
2006 | be64_to_cpu(ddq->d_rtbcount) >= | 2006 | be64_to_cpu(ddq->d_rtbcount) > |
2007 | be64_to_cpu(ddq->d_rtb_softlimit)) { | 2007 | be64_to_cpu(ddq->d_rtb_softlimit)) { |
2008 | if (!ddq->d_rtbtimer) { | 2008 | if (!ddq->d_rtbtimer) { |
2009 | if (flags & XFS_QMOPT_DOWARN) | 2009 | if (flags & XFS_QMOPT_DOWARN) |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 671f37eae1c7..c436def733bf 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -50,7 +50,6 @@ | |||
50 | */ | 50 | */ |
51 | struct mutex xfs_Gqm_lock; | 51 | struct mutex xfs_Gqm_lock; |
52 | struct xfs_qm *xfs_Gqm; | 52 | struct xfs_qm *xfs_Gqm; |
53 | uint ndquot; | ||
54 | 53 | ||
55 | kmem_zone_t *qm_dqzone; | 54 | kmem_zone_t *qm_dqzone; |
56 | kmem_zone_t *qm_dqtrxzone; | 55 | kmem_zone_t *qm_dqtrxzone; |
@@ -93,7 +92,6 @@ xfs_Gqm_init(void) | |||
93 | goto out_free_udqhash; | 92 | goto out_free_udqhash; |
94 | 93 | ||
95 | hsize /= sizeof(xfs_dqhash_t); | 94 | hsize /= sizeof(xfs_dqhash_t); |
96 | ndquot = hsize << 8; | ||
97 | 95 | ||
98 | xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); | 96 | xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); |
99 | xqm->qm_dqhashmask = hsize - 1; | 97 | xqm->qm_dqhashmask = hsize - 1; |
@@ -137,7 +135,6 @@ xfs_Gqm_init(void) | |||
137 | xqm->qm_dqtrxzone = qm_dqtrxzone; | 135 | xqm->qm_dqtrxzone = qm_dqtrxzone; |
138 | 136 | ||
139 | atomic_set(&xqm->qm_totaldquots, 0); | 137 | atomic_set(&xqm->qm_totaldquots, 0); |
140 | xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; | ||
141 | xqm->qm_nrefs = 0; | 138 | xqm->qm_nrefs = 0; |
142 | return xqm; | 139 | return xqm; |
143 | 140 | ||
@@ -1600,216 +1597,150 @@ xfs_qm_init_quotainos( | |||
1600 | return 0; | 1597 | return 0; |
1601 | } | 1598 | } |
1602 | 1599 | ||
1600 | STATIC void | ||
1601 | xfs_qm_dqfree_one( | ||
1602 | struct xfs_dquot *dqp) | ||
1603 | { | ||
1604 | struct xfs_mount *mp = dqp->q_mount; | ||
1605 | struct xfs_quotainfo *qi = mp->m_quotainfo; | ||
1603 | 1606 | ||
1607 | mutex_lock(&dqp->q_hash->qh_lock); | ||
1608 | list_del_init(&dqp->q_hashlist); | ||
1609 | dqp->q_hash->qh_version++; | ||
1610 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
1604 | 1611 | ||
1605 | /* | 1612 | mutex_lock(&qi->qi_dqlist_lock); |
1606 | * Pop the least recently used dquot off the freelist and recycle it. | 1613 | list_del_init(&dqp->q_mplist); |
1607 | */ | 1614 | qi->qi_dquots--; |
1608 | STATIC struct xfs_dquot * | 1615 | qi->qi_dqreclaims++; |
1609 | xfs_qm_dqreclaim_one(void) | 1616 | mutex_unlock(&qi->qi_dqlist_lock); |
1617 | |||
1618 | xfs_qm_dqdestroy(dqp); | ||
1619 | } | ||
1620 | |||
1621 | STATIC void | ||
1622 | xfs_qm_dqreclaim_one( | ||
1623 | struct xfs_dquot *dqp, | ||
1624 | struct list_head *dispose_list) | ||
1610 | { | 1625 | { |
1611 | struct xfs_dquot *dqp; | 1626 | struct xfs_mount *mp = dqp->q_mount; |
1612 | int restarts = 0; | 1627 | int error; |
1613 | 1628 | ||
1614 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 1629 | if (!xfs_dqlock_nowait(dqp)) |
1615 | restart: | 1630 | goto out_busy; |
1616 | list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { | ||
1617 | struct xfs_mount *mp = dqp->q_mount; | ||
1618 | 1631 | ||
1619 | if (!xfs_dqlock_nowait(dqp)) | 1632 | /* |
1620 | continue; | 1633 | * This dquot has acquired a reference in the meantime remove it from |
1634 | * the freelist and try again. | ||
1635 | */ | ||
1636 | if (dqp->q_nrefs) { | ||
1637 | xfs_dqunlock(dqp); | ||
1621 | 1638 | ||
1622 | /* | 1639 | trace_xfs_dqreclaim_want(dqp); |
1623 | * This dquot has already been grabbed by dqlookup. | 1640 | XQM_STATS_INC(xqmstats.xs_qm_dqwants); |
1624 | * Remove it from the freelist and try again. | ||
1625 | */ | ||
1626 | if (dqp->q_nrefs) { | ||
1627 | trace_xfs_dqreclaim_want(dqp); | ||
1628 | XQM_STATS_INC(xqmstats.xs_qm_dqwants); | ||
1629 | |||
1630 | list_del_init(&dqp->q_freelist); | ||
1631 | xfs_Gqm->qm_dqfrlist_cnt--; | ||
1632 | restarts++; | ||
1633 | goto dqunlock; | ||
1634 | } | ||
1635 | 1641 | ||
1636 | ASSERT(dqp->q_hash); | 1642 | list_del_init(&dqp->q_freelist); |
1637 | ASSERT(!list_empty(&dqp->q_mplist)); | 1643 | xfs_Gqm->qm_dqfrlist_cnt--; |
1644 | return; | ||
1645 | } | ||
1638 | 1646 | ||
1639 | /* | 1647 | ASSERT(dqp->q_hash); |
1640 | * Try to grab the flush lock. If this dquot is in the process | 1648 | ASSERT(!list_empty(&dqp->q_mplist)); |
1641 | * of getting flushed to disk, we don't want to reclaim it. | ||
1642 | */ | ||
1643 | if (!xfs_dqflock_nowait(dqp)) | ||
1644 | goto dqunlock; | ||
1645 | 1649 | ||
1646 | /* | 1650 | /* |
1647 | * We have the flush lock so we know that this is not in the | 1651 | * Try to grab the flush lock. If this dquot is in the process of |
1648 | * process of being flushed. So, if this is dirty, flush it | 1652 | * getting flushed to disk, we don't want to reclaim it. |
1649 | * DELWRI so that we don't get a freelist infested with | 1653 | */ |
1650 | * dirty dquots. | 1654 | if (!xfs_dqflock_nowait(dqp)) |
1651 | */ | 1655 | goto out_busy; |
1652 | if (XFS_DQ_IS_DIRTY(dqp)) { | ||
1653 | int error; | ||
1654 | 1656 | ||
1655 | trace_xfs_dqreclaim_dirty(dqp); | 1657 | /* |
1658 | * We have the flush lock so we know that this is not in the | ||
1659 | * process of being flushed. So, if this is dirty, flush it | ||
1660 | * DELWRI so that we don't get a freelist infested with | ||
1661 | * dirty dquots. | ||
1662 | */ | ||
1663 | if (XFS_DQ_IS_DIRTY(dqp)) { | ||
1664 | trace_xfs_dqreclaim_dirty(dqp); | ||
1656 | 1665 | ||
1657 | /* | 1666 | /* |
1658 | * We flush it delayed write, so don't bother | 1667 | * We flush it delayed write, so don't bother releasing the |
1659 | * releasing the freelist lock. | 1668 | * freelist lock. |
1660 | */ | 1669 | */ |
1661 | error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); | 1670 | error = xfs_qm_dqflush(dqp, 0); |
1662 | if (error) { | 1671 | if (error) { |
1663 | xfs_warn(mp, "%s: dquot %p flush failed", | 1672 | xfs_warn(mp, "%s: dquot %p flush failed", |
1664 | __func__, dqp); | 1673 | __func__, dqp); |
1665 | } | ||
1666 | goto dqunlock; | ||
1667 | } | 1674 | } |
1668 | xfs_dqfunlock(dqp); | ||
1669 | 1675 | ||
1670 | /* | 1676 | /* |
1671 | * Prevent lookup now that we are going to reclaim the dquot. | 1677 | * Give the dquot another try on the freelist, as the |
1672 | * Once XFS_DQ_FREEING is set lookup won't touch the dquot, | 1678 | * flushing will take some time. |
1673 | * thus we can drop the lock now. | ||
1674 | */ | 1679 | */ |
1675 | dqp->dq_flags |= XFS_DQ_FREEING; | 1680 | goto out_busy; |
1676 | xfs_dqunlock(dqp); | 1681 | } |
1677 | 1682 | xfs_dqfunlock(dqp); | |
1678 | mutex_lock(&dqp->q_hash->qh_lock); | ||
1679 | list_del_init(&dqp->q_hashlist); | ||
1680 | dqp->q_hash->qh_version++; | ||
1681 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
1682 | |||
1683 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | ||
1684 | list_del_init(&dqp->q_mplist); | ||
1685 | mp->m_quotainfo->qi_dquots--; | ||
1686 | mp->m_quotainfo->qi_dqreclaims++; | ||
1687 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | ||
1688 | 1683 | ||
1689 | ASSERT(dqp->q_nrefs == 0); | 1684 | /* |
1690 | list_del_init(&dqp->q_freelist); | 1685 | * Prevent lookups now that we are past the point of no return. |
1691 | xfs_Gqm->qm_dqfrlist_cnt--; | 1686 | */ |
1687 | dqp->dq_flags |= XFS_DQ_FREEING; | ||
1688 | xfs_dqunlock(dqp); | ||
1692 | 1689 | ||
1693 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1690 | ASSERT(dqp->q_nrefs == 0); |
1694 | return dqp; | 1691 | list_move_tail(&dqp->q_freelist, dispose_list); |
1695 | dqunlock: | 1692 | xfs_Gqm->qm_dqfrlist_cnt--; |
1696 | xfs_dqunlock(dqp); | ||
1697 | if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) | ||
1698 | break; | ||
1699 | goto restart; | ||
1700 | } | ||
1701 | 1693 | ||
1702 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1694 | trace_xfs_dqreclaim_done(dqp); |
1703 | return NULL; | 1695 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); |
1704 | } | 1696 | return; |
1705 | 1697 | ||
1706 | /* | 1698 | out_busy: |
1707 | * Traverse the freelist of dquots and attempt to reclaim a maximum of | 1699 | xfs_dqunlock(dqp); |
1708 | * 'howmany' dquots. This operation races with dqlookup(), and attempts to | ||
1709 | * favor the lookup function ... | ||
1710 | */ | ||
1711 | STATIC int | ||
1712 | xfs_qm_shake_freelist( | ||
1713 | int howmany) | ||
1714 | { | ||
1715 | int nreclaimed = 0; | ||
1716 | xfs_dquot_t *dqp; | ||
1717 | 1700 | ||
1718 | if (howmany <= 0) | 1701 | /* |
1719 | return 0; | 1702 | * Move the dquot to the tail of the list so that we don't spin on it. |
1703 | */ | ||
1704 | list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); | ||
1720 | 1705 | ||
1721 | while (nreclaimed < howmany) { | 1706 | trace_xfs_dqreclaim_busy(dqp); |
1722 | dqp = xfs_qm_dqreclaim_one(); | 1707 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); |
1723 | if (!dqp) | ||
1724 | return nreclaimed; | ||
1725 | xfs_qm_dqdestroy(dqp); | ||
1726 | nreclaimed++; | ||
1727 | } | ||
1728 | return nreclaimed; | ||
1729 | } | 1708 | } |
1730 | 1709 | ||
1731 | /* | ||
1732 | * The kmem_shake interface is invoked when memory is running low. | ||
1733 | */ | ||
1734 | /* ARGSUSED */ | ||
1735 | STATIC int | 1710 | STATIC int |
1736 | xfs_qm_shake( | 1711 | xfs_qm_shake( |
1737 | struct shrinker *shrink, | 1712 | struct shrinker *shrink, |
1738 | struct shrink_control *sc) | 1713 | struct shrink_control *sc) |
1739 | { | 1714 | { |
1740 | int ndqused, nfree, n; | 1715 | int nr_to_scan = sc->nr_to_scan; |
1741 | gfp_t gfp_mask = sc->gfp_mask; | 1716 | LIST_HEAD (dispose_list); |
1742 | 1717 | struct xfs_dquot *dqp; | |
1743 | if (!kmem_shake_allow(gfp_mask)) | ||
1744 | return 0; | ||
1745 | if (!xfs_Gqm) | ||
1746 | return 0; | ||
1747 | |||
1748 | nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ | ||
1749 | /* incore dquots in all f/s's */ | ||
1750 | ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; | ||
1751 | |||
1752 | ASSERT(ndqused >= 0); | ||
1753 | 1718 | ||
1754 | if (nfree <= ndqused && nfree < ndquot) | 1719 | if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) |
1755 | return 0; | 1720 | return 0; |
1721 | if (!nr_to_scan) | ||
1722 | goto out; | ||
1756 | 1723 | ||
1757 | ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ | 1724 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); |
1758 | n = nfree - ndqused - ndquot; /* # over target */ | 1725 | while (!list_empty(&xfs_Gqm->qm_dqfrlist)) { |
1759 | 1726 | if (nr_to_scan-- <= 0) | |
1760 | return xfs_qm_shake_freelist(MAX(nfree, n)); | 1727 | break; |
1761 | } | 1728 | dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot, |
1762 | 1729 | q_freelist); | |
1763 | 1730 | xfs_qm_dqreclaim_one(dqp, &dispose_list); | |
1764 | /*------------------------------------------------------------------*/ | ||
1765 | |||
1766 | /* | ||
1767 | * Return a new incore dquot. Depending on the number of | ||
1768 | * dquots in the system, we either allocate a new one on the kernel heap, | ||
1769 | * or reclaim a free one. | ||
1770 | * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed | ||
1771 | * to reclaim an existing one from the freelist. | ||
1772 | */ | ||
1773 | boolean_t | ||
1774 | xfs_qm_dqalloc_incore( | ||
1775 | xfs_dquot_t **O_dqpp) | ||
1776 | { | ||
1777 | xfs_dquot_t *dqp; | ||
1778 | |||
1779 | /* | ||
1780 | * Check against high water mark to see if we want to pop | ||
1781 | * a nincompoop dquot off the freelist. | ||
1782 | */ | ||
1783 | if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { | ||
1784 | /* | ||
1785 | * Try to recycle a dquot from the freelist. | ||
1786 | */ | ||
1787 | if ((dqp = xfs_qm_dqreclaim_one())) { | ||
1788 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); | ||
1789 | /* | ||
1790 | * Just zero the core here. The rest will get | ||
1791 | * reinitialized by caller. XXX we shouldn't even | ||
1792 | * do this zero ... | ||
1793 | */ | ||
1794 | memset(&dqp->q_core, 0, sizeof(dqp->q_core)); | ||
1795 | *O_dqpp = dqp; | ||
1796 | return B_FALSE; | ||
1797 | } | ||
1798 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); | ||
1799 | } | 1731 | } |
1732 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | ||
1800 | 1733 | ||
1801 | /* | 1734 | while (!list_empty(&dispose_list)) { |
1802 | * Allocate a brand new dquot on the kernel heap and return it | 1735 | dqp = list_first_entry(&dispose_list, struct xfs_dquot, |
1803 | * to the caller to initialize. | 1736 | q_freelist); |
1804 | */ | 1737 | list_del_init(&dqp->q_freelist); |
1805 | ASSERT(xfs_Gqm->qm_dqzone != NULL); | 1738 | xfs_qm_dqfree_one(dqp); |
1806 | *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); | 1739 | } |
1807 | atomic_inc(&xfs_Gqm->qm_totaldquots); | 1740 | out: |
1808 | 1741 | return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure; | |
1809 | return B_TRUE; | ||
1810 | } | 1742 | } |
1811 | 1743 | ||
1812 | |||
1813 | /* | 1744 | /* |
1814 | * Start a transaction and write the incore superblock changes to | 1745 | * Start a transaction and write the incore superblock changes to |
1815 | * disk. flags parameter indicates which fields have changed. | 1746 | * disk. flags parameter indicates which fields have changed. |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 9b4f3adefbc5..9a9b997e1a0a 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -26,24 +26,12 @@ | |||
26 | struct xfs_qm; | 26 | struct xfs_qm; |
27 | struct xfs_inode; | 27 | struct xfs_inode; |
28 | 28 | ||
29 | extern uint ndquot; | ||
30 | extern struct mutex xfs_Gqm_lock; | 29 | extern struct mutex xfs_Gqm_lock; |
31 | extern struct xfs_qm *xfs_Gqm; | 30 | extern struct xfs_qm *xfs_Gqm; |
32 | extern kmem_zone_t *qm_dqzone; | 31 | extern kmem_zone_t *qm_dqzone; |
33 | extern kmem_zone_t *qm_dqtrxzone; | 32 | extern kmem_zone_t *qm_dqtrxzone; |
34 | 33 | ||
35 | /* | 34 | /* |
36 | * Ditto, for xfs_qm_dqreclaim_one. | ||
37 | */ | ||
38 | #define XFS_QM_RECLAIM_MAX_RESTARTS 4 | ||
39 | |||
40 | /* | ||
41 | * Ideal ratio of free to in use dquots. Quota manager makes an attempt | ||
42 | * to keep this balance. | ||
43 | */ | ||
44 | #define XFS_QM_DQFREE_RATIO 2 | ||
45 | |||
46 | /* | ||
47 | * Dquot hashtable constants/threshold values. | 35 | * Dquot hashtable constants/threshold values. |
48 | */ | 36 | */ |
49 | #define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) | 37 | #define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) |
@@ -74,7 +62,6 @@ typedef struct xfs_qm { | |||
74 | int qm_dqfrlist_cnt; | 62 | int qm_dqfrlist_cnt; |
75 | atomic_t qm_totaldquots; /* total incore dquots */ | 63 | atomic_t qm_totaldquots; /* total incore dquots */ |
76 | uint qm_nrefs; /* file systems with quota on */ | 64 | uint qm_nrefs; /* file systems with quota on */ |
77 | int qm_dqfree_ratio;/* ratio of free to inuse dquots */ | ||
78 | kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ | 65 | kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ |
79 | kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ | 66 | kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ |
80 | } xfs_qm_t; | 67 | } xfs_qm_t; |
@@ -143,7 +130,6 @@ extern int xfs_qm_quotacheck(xfs_mount_t *); | |||
143 | extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); | 130 | extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); |
144 | 131 | ||
145 | /* dquot stuff */ | 132 | /* dquot stuff */ |
146 | extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); | ||
147 | extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); | 133 | extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); |
148 | extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); | 134 | extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); |
149 | 135 | ||
diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c index 8671a0b32644..5729ba570877 100644 --- a/fs/xfs/xfs_qm_stats.c +++ b/fs/xfs/xfs_qm_stats.c | |||
@@ -42,9 +42,9 @@ static int xqm_proc_show(struct seq_file *m, void *v) | |||
42 | { | 42 | { |
43 | /* maximum; incore; ratio free to inuse; freelist */ | 43 | /* maximum; incore; ratio free to inuse; freelist */ |
44 | seq_printf(m, "%d\t%d\t%d\t%u\n", | 44 | seq_printf(m, "%d\t%d\t%d\t%u\n", |
45 | ndquot, | 45 | 0, |
46 | xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, | 46 | xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, |
47 | xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, | 47 | 0, |
48 | xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); | 48 | xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); |
49 | return 0; | 49 | return 0; |
50 | } | 50 | } |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index eafbcff81f3a..711a86e39ff0 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -813,11 +813,11 @@ xfs_qm_export_dquot( | |||
813 | (XFS_IS_OQUOTA_ENFORCED(mp) && | 813 | (XFS_IS_OQUOTA_ENFORCED(mp) && |
814 | (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && | 814 | (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && |
815 | dst->d_id != 0) { | 815 | dst->d_id != 0) { |
816 | if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && | 816 | if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) && |
817 | (dst->d_blk_softlimit > 0)) { | 817 | (dst->d_blk_softlimit > 0)) { |
818 | ASSERT(dst->d_btimer != 0); | 818 | ASSERT(dst->d_btimer != 0); |
819 | } | 819 | } |
820 | if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && | 820 | if (((int) dst->d_icount > (int) dst->d_ino_softlimit) && |
821 | (dst->d_ino_softlimit > 0)) { | 821 | (dst->d_ino_softlimit > 0)) { |
822 | ASSERT(dst->d_itimer != 0); | 822 | ASSERT(dst->d_itimer != 0); |
823 | } | 823 | } |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 6b6df5802e95..bb134a819930 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -733,11 +733,10 @@ DEFINE_EVENT(xfs_dquot_class, name, \ | |||
733 | DEFINE_DQUOT_EVENT(xfs_dqadjust); | 733 | DEFINE_DQUOT_EVENT(xfs_dqadjust); |
734 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); | 734 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); |
735 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); | 735 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); |
736 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); | 736 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy); |
737 | DEFINE_DQUOT_EVENT(xfs_dqreclaim_done); | ||
737 | DEFINE_DQUOT_EVENT(xfs_dqattach_found); | 738 | DEFINE_DQUOT_EVENT(xfs_dqattach_found); |
738 | DEFINE_DQUOT_EVENT(xfs_dqattach_get); | 739 | DEFINE_DQUOT_EVENT(xfs_dqattach_get); |
739 | DEFINE_DQUOT_EVENT(xfs_dqinit); | ||
740 | DEFINE_DQUOT_EVENT(xfs_dqreuse); | ||
741 | DEFINE_DQUOT_EVENT(xfs_dqalloc); | 740 | DEFINE_DQUOT_EVENT(xfs_dqalloc); |
742 | DEFINE_DQUOT_EVENT(xfs_dqtobp_read); | 741 | DEFINE_DQUOT_EVENT(xfs_dqtobp_read); |
743 | DEFINE_DQUOT_EVENT(xfs_dqread); | 742 | DEFINE_DQUOT_EVENT(xfs_dqread); |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 329b06aba1c2..7adcdf15ae0c 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -1151,8 +1151,8 @@ xfs_trans_add_item( | |||
1151 | { | 1151 | { |
1152 | struct xfs_log_item_desc *lidp; | 1152 | struct xfs_log_item_desc *lidp; |
1153 | 1153 | ||
1154 | ASSERT(lip->li_mountp = tp->t_mountp); | 1154 | ASSERT(lip->li_mountp == tp->t_mountp); |
1155 | ASSERT(lip->li_ailp = tp->t_mountp->m_ail); | 1155 | ASSERT(lip->li_ailp == tp->t_mountp->m_ail); |
1156 | 1156 | ||
1157 | lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); | 1157 | lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); |
1158 | 1158 | ||
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 4d00ee67792d..c4ba366d24e6 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
@@ -649,12 +649,12 @@ xfs_trans_dqresv( | |||
649 | * nblks. | 649 | * nblks. |
650 | */ | 650 | */ |
651 | if (hardlimit > 0ULL && | 651 | if (hardlimit > 0ULL && |
652 | hardlimit <= nblks + *resbcountp) { | 652 | hardlimit < nblks + *resbcountp) { |
653 | xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); | 653 | xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); |
654 | goto error_return; | 654 | goto error_return; |
655 | } | 655 | } |
656 | if (softlimit > 0ULL && | 656 | if (softlimit > 0ULL && |
657 | softlimit <= nblks + *resbcountp) { | 657 | softlimit < nblks + *resbcountp) { |
658 | if ((timer != 0 && get_seconds() > timer) || | 658 | if ((timer != 0 && get_seconds() > timer) || |
659 | (warns != 0 && warns >= warnlimit)) { | 659 | (warns != 0 && warns >= warnlimit)) { |
660 | xfs_quota_warn(mp, dqp, | 660 | xfs_quota_warn(mp, dqp, |
@@ -677,11 +677,13 @@ xfs_trans_dqresv( | |||
677 | if (!softlimit) | 677 | if (!softlimit) |
678 | softlimit = q->qi_isoftlimit; | 678 | softlimit = q->qi_isoftlimit; |
679 | 679 | ||
680 | if (hardlimit > 0ULL && count >= hardlimit) { | 680 | if (hardlimit > 0ULL && |
681 | hardlimit < ninos + count) { | ||
681 | xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); | 682 | xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); |
682 | goto error_return; | 683 | goto error_return; |
683 | } | 684 | } |
684 | if (softlimit > 0ULL && count >= softlimit) { | 685 | if (softlimit > 0ULL && |
686 | softlimit < ninos + count) { | ||
685 | if ((timer != 0 && get_seconds() > timer) || | 687 | if ((timer != 0 && get_seconds() > timer) || |
686 | (warns != 0 && warns >= warnlimit)) { | 688 | (warns != 0 && warns >= warnlimit)) { |
687 | xfs_quota_warn(mp, dqp, | 689 | xfs_quota_warn(mp, dqp, |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 0cf52da9d246..ebdb88840a47 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -131,7 +131,8 @@ xfs_readlink( | |||
131 | __func__, (unsigned long long) ip->i_ino, | 131 | __func__, (unsigned long long) ip->i_ino, |
132 | (long long) pathlen); | 132 | (long long) pathlen); |
133 | ASSERT(0); | 133 | ASSERT(0); |
134 | return XFS_ERROR(EFSCORRUPTED); | 134 | error = XFS_ERROR(EFSCORRUPTED); |
135 | goto out; | ||
135 | } | 136 | } |
136 | 137 | ||
137 | 138 | ||