diff options
Diffstat (limited to 'fs')
39 files changed, 1181 insertions, 450 deletions
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 210acafe4a9b..3ff8bdd18fb3 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
@@ -432,7 +432,6 @@ vfs_rejected_lock: | |||
432 | list_del_init(&fl->fl_u.afs.link); | 432 | list_del_init(&fl->fl_u.afs.link); |
433 | if (list_empty(&vnode->granted_locks)) | 433 | if (list_empty(&vnode->granted_locks)) |
434 | afs_defer_unlock(vnode, key); | 434 | afs_defer_unlock(vnode, key); |
435 | spin_unlock(&vnode->lock); | ||
436 | goto abort_attempt; | 435 | goto abort_attempt; |
437 | } | 436 | } |
438 | 437 | ||
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
485 | { | 485 | { |
486 | assert_spin_locked(&ctx->ctx_lock); | 486 | assert_spin_locked(&ctx->ctx_lock); |
487 | 487 | ||
488 | if (req->ki_eventfd != NULL) | ||
489 | eventfd_ctx_put(req->ki_eventfd); | ||
488 | if (req->ki_dtor) | 490 | if (req->ki_dtor) |
489 | req->ki_dtor(req); | 491 | req->ki_dtor(req); |
490 | if (req->ki_iovec != &req->ki_inline_vec) | 492 | if (req->ki_iovec != &req->ki_inline_vec) |
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data) | |||
509 | /* Complete the fput(s) */ | 511 | /* Complete the fput(s) */ |
510 | if (req->ki_filp != NULL) | 512 | if (req->ki_filp != NULL) |
511 | __fput(req->ki_filp); | 513 | __fput(req->ki_filp); |
512 | if (req->ki_eventfd != NULL) | ||
513 | __fput(req->ki_eventfd); | ||
514 | 514 | ||
515 | /* Link the iocb into the context's free list */ | 515 | /* Link the iocb into the context's free list */ |
516 | spin_lock_irq(&ctx->ctx_lock); | 516 | spin_lock_irq(&ctx->ctx_lock); |
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data) | |||
528 | */ | 528 | */ |
529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) |
530 | { | 530 | { |
531 | int schedule_putreq = 0; | ||
532 | |||
533 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", | 531 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", |
534 | req, atomic_long_read(&req->ki_filp->f_count)); | 532 | req, atomic_long_read(&req->ki_filp->f_count)); |
535 | 533 | ||
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
549 | * we would not be holding the last reference to the file*, so | 547 | * we would not be holding the last reference to the file*, so |
550 | * this function will be executed w/out any aio kthread wakeup. | 548 | * this function will be executed w/out any aio kthread wakeup. |
551 | */ | 549 | */ |
552 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) | 550 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { |
553 | schedule_putreq++; | ||
554 | else | ||
555 | req->ki_filp = NULL; | ||
556 | if (req->ki_eventfd != NULL) { | ||
557 | if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count))) | ||
558 | schedule_putreq++; | ||
559 | else | ||
560 | req->ki_eventfd = NULL; | ||
561 | } | ||
562 | if (unlikely(schedule_putreq)) { | ||
563 | get_ioctx(ctx); | 551 | get_ioctx(ctx); |
564 | spin_lock(&fput_lock); | 552 | spin_lock(&fput_lock); |
565 | list_add(&req->ki_list, &fput_head); | 553 | list_add(&req->ki_list, &fput_head); |
566 | spin_unlock(&fput_lock); | 554 | spin_unlock(&fput_lock); |
567 | queue_work(aio_wq, &fput_work); | 555 | queue_work(aio_wq, &fput_work); |
568 | } else | 556 | } else { |
557 | req->ki_filp = NULL; | ||
569 | really_put_req(ctx, req); | 558 | really_put_req(ctx, req); |
559 | } | ||
570 | return 1; | 560 | return 1; |
571 | } | 561 | } |
572 | 562 | ||
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1622 | * an eventfd() fd, and will be signaled for each completed | 1612 | * an eventfd() fd, and will be signaled for each completed |
1623 | * event using the eventfd_signal() function. | 1613 | * event using the eventfd_signal() function. |
1624 | */ | 1614 | */ |
1625 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); | 1615 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
1626 | if (IS_ERR(req->ki_eventfd)) { | 1616 | if (IS_ERR(req->ki_eventfd)) { |
1627 | ret = PTR_ERR(req->ki_eventfd); | 1617 | ret = PTR_ERR(req->ki_eventfd); |
1628 | req->ki_eventfd = NULL; | 1618 | req->ki_eventfd = NULL; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9fa212b014a5..b7c1603cd4bd 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, | |||
1522 | info->thread = NULL; | 1522 | info->thread = NULL; |
1523 | 1523 | ||
1524 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); | 1524 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); |
1525 | fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | ||
1526 | |||
1527 | if (psinfo == NULL) | 1525 | if (psinfo == NULL) |
1528 | return 0; | 1526 | return 0; |
1529 | 1527 | ||
1528 | fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | ||
1529 | |||
1530 | /* | 1530 | /* |
1531 | * Figure out how many notes we're going to need for each thread. | 1531 | * Figure out how many notes we're going to need for each thread. |
1532 | */ | 1532 | */ |
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
1929 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); | 1929 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); |
1930 | if (!elf) | 1930 | if (!elf) |
1931 | goto out; | 1931 | goto out; |
1932 | 1932 | /* | |
1933 | * The number of segs are recored into ELF header as 16bit value. | ||
1934 | * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. | ||
1935 | */ | ||
1933 | segs = current->mm->map_count; | 1936 | segs = current->mm->map_count; |
1934 | #ifdef ELF_CORE_EXTRA_PHDRS | 1937 | #ifdef ELF_CORE_EXTRA_PHDRS |
1935 | segs += ELF_CORE_EXTRA_PHDRS; | 1938 | segs += ELF_CORE_EXTRA_PHDRS; |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 31c46a241bac..49a34e7f7306 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bio-integrity.c - bio data integrity extensions | 2 | * bio-integrity.c - bio data integrity extensions |
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Oracle Corporation | 4 | * Copyright (C) 2007, 2008, 2009 Oracle Corporation |
5 | * Written by: Martin K. Petersen <martin.petersen@oracle.com> | 5 | * Written by: Martin K. Petersen <martin.petersen@oracle.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
@@ -25,63 +25,121 @@ | |||
25 | #include <linux/bio.h> | 25 | #include <linux/bio.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | 27 | ||
28 | static struct kmem_cache *bio_integrity_slab __read_mostly; | 28 | struct integrity_slab { |
29 | static mempool_t *bio_integrity_pool; | 29 | struct kmem_cache *slab; |
30 | static struct bio_set *integrity_bio_set; | 30 | unsigned short nr_vecs; |
31 | char name[8]; | ||
32 | }; | ||
33 | |||
34 | #define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) } | ||
35 | struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = { | ||
36 | IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES), | ||
37 | }; | ||
38 | #undef IS | ||
39 | |||
31 | static struct workqueue_struct *kintegrityd_wq; | 40 | static struct workqueue_struct *kintegrityd_wq; |
32 | 41 | ||
42 | static inline unsigned int vecs_to_idx(unsigned int nr) | ||
43 | { | ||
44 | switch (nr) { | ||
45 | case 1: | ||
46 | return 0; | ||
47 | case 2 ... 4: | ||
48 | return 1; | ||
49 | case 5 ... 16: | ||
50 | return 2; | ||
51 | case 17 ... 64: | ||
52 | return 3; | ||
53 | case 65 ... 128: | ||
54 | return 4; | ||
55 | case 129 ... BIO_MAX_PAGES: | ||
56 | return 5; | ||
57 | default: | ||
58 | BUG(); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static inline int use_bip_pool(unsigned int idx) | ||
63 | { | ||
64 | if (idx == BIOVEC_NR_POOLS) | ||
65 | return 1; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
33 | /** | 70 | /** |
34 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | 71 | * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio |
35 | * @bio: bio to attach integrity metadata to | 72 | * @bio: bio to attach integrity metadata to |
36 | * @gfp_mask: Memory allocation mask | 73 | * @gfp_mask: Memory allocation mask |
37 | * @nr_vecs: Number of integrity metadata scatter-gather elements | 74 | * @nr_vecs: Number of integrity metadata scatter-gather elements |
75 | * @bs: bio_set to allocate from | ||
38 | * | 76 | * |
39 | * Description: This function prepares a bio for attaching integrity | 77 | * Description: This function prepares a bio for attaching integrity |
40 | * metadata. nr_vecs specifies the maximum number of pages containing | 78 | * metadata. nr_vecs specifies the maximum number of pages containing |
41 | * integrity metadata that can be attached. | 79 | * integrity metadata that can be attached. |
42 | */ | 80 | */ |
43 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | 81 | struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, |
44 | gfp_t gfp_mask, | 82 | gfp_t gfp_mask, |
45 | unsigned int nr_vecs) | 83 | unsigned int nr_vecs, |
84 | struct bio_set *bs) | ||
46 | { | 85 | { |
47 | struct bio_integrity_payload *bip; | 86 | struct bio_integrity_payload *bip; |
48 | struct bio_vec *iv; | 87 | unsigned int idx = vecs_to_idx(nr_vecs); |
49 | unsigned long idx; | ||
50 | 88 | ||
51 | BUG_ON(bio == NULL); | 89 | BUG_ON(bio == NULL); |
90 | bip = NULL; | ||
52 | 91 | ||
53 | bip = mempool_alloc(bio_integrity_pool, gfp_mask); | 92 | /* Lower order allocations come straight from slab */ |
54 | if (unlikely(bip == NULL)) { | 93 | if (!use_bip_pool(idx)) |
55 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); | 94 | bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask); |
56 | return NULL; | ||
57 | } | ||
58 | 95 | ||
59 | memset(bip, 0, sizeof(*bip)); | 96 | /* Use mempool if lower order alloc failed or max vecs were requested */ |
97 | if (bip == NULL) { | ||
98 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | ||
60 | 99 | ||
61 | iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set); | 100 | if (unlikely(bip == NULL)) { |
62 | if (unlikely(iv == NULL)) { | 101 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); |
63 | printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); | 102 | return NULL; |
64 | mempool_free(bip, bio_integrity_pool); | 103 | } |
65 | return NULL; | ||
66 | } | 104 | } |
67 | 105 | ||
68 | bip->bip_pool = idx; | 106 | memset(bip, 0, sizeof(*bip)); |
69 | bip->bip_vec = iv; | 107 | |
108 | bip->bip_slab = idx; | ||
70 | bip->bip_bio = bio; | 109 | bip->bip_bio = bio; |
71 | bio->bi_integrity = bip; | 110 | bio->bi_integrity = bip; |
72 | 111 | ||
73 | return bip; | 112 | return bip; |
74 | } | 113 | } |
114 | EXPORT_SYMBOL(bio_integrity_alloc_bioset); | ||
115 | |||
116 | /** | ||
117 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | ||
118 | * @bio: bio to attach integrity metadata to | ||
119 | * @gfp_mask: Memory allocation mask | ||
120 | * @nr_vecs: Number of integrity metadata scatter-gather elements | ||
121 | * | ||
122 | * Description: This function prepares a bio for attaching integrity | ||
123 | * metadata. nr_vecs specifies the maximum number of pages containing | ||
124 | * integrity metadata that can be attached. | ||
125 | */ | ||
126 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | ||
127 | gfp_t gfp_mask, | ||
128 | unsigned int nr_vecs) | ||
129 | { | ||
130 | return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set); | ||
131 | } | ||
75 | EXPORT_SYMBOL(bio_integrity_alloc); | 132 | EXPORT_SYMBOL(bio_integrity_alloc); |
76 | 133 | ||
77 | /** | 134 | /** |
78 | * bio_integrity_free - Free bio integrity payload | 135 | * bio_integrity_free - Free bio integrity payload |
79 | * @bio: bio containing bip to be freed | 136 | * @bio: bio containing bip to be freed |
137 | * @bs: bio_set this bio was allocated from | ||
80 | * | 138 | * |
81 | * Description: Used to free the integrity portion of a bio. Usually | 139 | * Description: Used to free the integrity portion of a bio. Usually |
82 | * called from bio_free(). | 140 | * called from bio_free(). |
83 | */ | 141 | */ |
84 | void bio_integrity_free(struct bio *bio) | 142 | void bio_integrity_free(struct bio *bio, struct bio_set *bs) |
85 | { | 143 | { |
86 | struct bio_integrity_payload *bip = bio->bi_integrity; | 144 | struct bio_integrity_payload *bip = bio->bi_integrity; |
87 | 145 | ||
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio) | |||
92 | && bip->bip_buf != NULL) | 150 | && bip->bip_buf != NULL) |
93 | kfree(bip->bip_buf); | 151 | kfree(bip->bip_buf); |
94 | 152 | ||
95 | bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool); | 153 | if (use_bip_pool(bip->bip_slab)) |
96 | mempool_free(bip, bio_integrity_pool); | 154 | mempool_free(bip, bs->bio_integrity_pool); |
155 | else | ||
156 | kmem_cache_free(bip_slab[bip->bip_slab].slab, bip); | ||
97 | 157 | ||
98 | bio->bi_integrity = NULL; | 158 | bio->bi_integrity = NULL; |
99 | } | 159 | } |
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
114 | struct bio_integrity_payload *bip = bio->bi_integrity; | 174 | struct bio_integrity_payload *bip = bio->bi_integrity; |
115 | struct bio_vec *iv; | 175 | struct bio_vec *iv; |
116 | 176 | ||
117 | if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) { | 177 | if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { |
118 | printk(KERN_ERR "%s: bip_vec full\n", __func__); | 178 | printk(KERN_ERR "%s: bip_vec full\n", __func__); |
119 | return 0; | 179 | return 0; |
120 | } | 180 | } |
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors) | |||
647 | bp->iv1 = bip->bip_vec[0]; | 707 | bp->iv1 = bip->bip_vec[0]; |
648 | bp->iv2 = bip->bip_vec[0]; | 708 | bp->iv2 = bip->bip_vec[0]; |
649 | 709 | ||
650 | bp->bip1.bip_vec = &bp->iv1; | 710 | bp->bip1.bip_vec[0] = bp->iv1; |
651 | bp->bip2.bip_vec = &bp->iv2; | 711 | bp->bip2.bip_vec[0] = bp->iv2; |
652 | 712 | ||
653 | bp->iv1.bv_len = sectors * bi->tuple_size; | 713 | bp->iv1.bv_len = sectors * bi->tuple_size; |
654 | bp->iv2.bv_offset += sectors * bi->tuple_size; | 714 | bp->iv2.bv_offset += sectors * bi->tuple_size; |
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split); | |||
667 | * @bio: New bio | 727 | * @bio: New bio |
668 | * @bio_src: Original bio | 728 | * @bio_src: Original bio |
669 | * @gfp_mask: Memory allocation mask | 729 | * @gfp_mask: Memory allocation mask |
730 | * @bs: bio_set to allocate bip from | ||
670 | * | 731 | * |
671 | * Description: Called to allocate a bip when cloning a bio | 732 | * Description: Called to allocate a bip when cloning a bio |
672 | */ | 733 | */ |
673 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) | 734 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
735 | gfp_t gfp_mask, struct bio_set *bs) | ||
674 | { | 736 | { |
675 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; | 737 | struct bio_integrity_payload *bip_src = bio_src->bi_integrity; |
676 | struct bio_integrity_payload *bip; | 738 | struct bio_integrity_payload *bip; |
677 | 739 | ||
678 | BUG_ON(bip_src == NULL); | 740 | BUG_ON(bip_src == NULL); |
679 | 741 | ||
680 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); | 742 | bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs); |
681 | 743 | ||
682 | if (bip == NULL) | 744 | if (bip == NULL) |
683 | return -EIO; | 745 | return -EIO; |
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) | |||
693 | } | 755 | } |
694 | EXPORT_SYMBOL(bio_integrity_clone); | 756 | EXPORT_SYMBOL(bio_integrity_clone); |
695 | 757 | ||
696 | static int __init bio_integrity_init(void) | 758 | int bioset_integrity_create(struct bio_set *bs, int pool_size) |
697 | { | 759 | { |
698 | kintegrityd_wq = create_workqueue("kintegrityd"); | 760 | unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); |
761 | |||
762 | bs->bio_integrity_pool = | ||
763 | mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); | ||
699 | 764 | ||
765 | if (!bs->bio_integrity_pool) | ||
766 | return -1; | ||
767 | |||
768 | return 0; | ||
769 | } | ||
770 | EXPORT_SYMBOL(bioset_integrity_create); | ||
771 | |||
772 | void bioset_integrity_free(struct bio_set *bs) | ||
773 | { | ||
774 | if (bs->bio_integrity_pool) | ||
775 | mempool_destroy(bs->bio_integrity_pool); | ||
776 | } | ||
777 | EXPORT_SYMBOL(bioset_integrity_free); | ||
778 | |||
779 | void __init bio_integrity_init(void) | ||
780 | { | ||
781 | unsigned int i; | ||
782 | |||
783 | kintegrityd_wq = create_workqueue("kintegrityd"); | ||
700 | if (!kintegrityd_wq) | 784 | if (!kintegrityd_wq) |
701 | panic("Failed to create kintegrityd\n"); | 785 | panic("Failed to create kintegrityd\n"); |
702 | 786 | ||
703 | bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, | 787 | for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) { |
704 | SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 788 | unsigned int size; |
705 | 789 | ||
706 | bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE, | 790 | size = sizeof(struct bio_integrity_payload) |
707 | bio_integrity_slab); | 791 | + bip_slab[i].nr_vecs * sizeof(struct bio_vec); |
708 | if (!bio_integrity_pool) | ||
709 | panic("bio_integrity: can't allocate bip pool\n"); | ||
710 | 792 | ||
711 | integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0); | 793 | bip_slab[i].slab = |
712 | if (!integrity_bio_set) | 794 | kmem_cache_create(bip_slab[i].name, size, 0, |
713 | panic("bio_integrity: can't allocate bio_set\n"); | 795 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
714 | 796 | } | |
715 | return 0; | ||
716 | } | 797 | } |
717 | subsys_initcall(bio_integrity_init); | ||
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
238 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | 238 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); |
239 | 239 | ||
240 | if (bio_integrity(bio)) | 240 | if (bio_integrity(bio)) |
241 | bio_integrity_free(bio); | 241 | bio_integrity_free(bio, bs); |
242 | 242 | ||
243 | /* | 243 | /* |
244 | * If we have front padding, adjust the bio pointer before freeing | 244 | * If we have front padding, adjust the bio pointer before freeing |
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | |||
341 | static void bio_kmalloc_destructor(struct bio *bio) | 341 | static void bio_kmalloc_destructor(struct bio *bio) |
342 | { | 342 | { |
343 | if (bio_integrity(bio)) | 343 | if (bio_integrity(bio)) |
344 | bio_integrity_free(bio); | 344 | bio_integrity_free(bio, fs_bio_set); |
345 | kfree(bio); | 345 | kfree(bio); |
346 | } | 346 | } |
347 | 347 | ||
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
472 | if (bio_integrity(bio)) { | 472 | if (bio_integrity(bio)) { |
473 | int ret; | 473 | int ret; |
474 | 474 | ||
475 | ret = bio_integrity_clone(b, bio, gfp_mask); | 475 | ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set); |
476 | 476 | ||
477 | if (ret < 0) { | 477 | if (ret < 0) { |
478 | bio_put(b); | 478 | bio_put(b); |
@@ -1539,6 +1539,7 @@ void bioset_free(struct bio_set *bs) | |||
1539 | if (bs->bio_pool) | 1539 | if (bs->bio_pool) |
1540 | mempool_destroy(bs->bio_pool); | 1540 | mempool_destroy(bs->bio_pool); |
1541 | 1541 | ||
1542 | bioset_integrity_free(bs); | ||
1542 | biovec_free_pools(bs); | 1543 | biovec_free_pools(bs); |
1543 | bio_put_slab(bs); | 1544 | bio_put_slab(bs); |
1544 | 1545 | ||
@@ -1579,6 +1580,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | |||
1579 | if (!bs->bio_pool) | 1580 | if (!bs->bio_pool) |
1580 | goto bad; | 1581 | goto bad; |
1581 | 1582 | ||
1583 | if (bioset_integrity_create(bs, pool_size)) | ||
1584 | goto bad; | ||
1585 | |||
1582 | if (!biovec_create_pools(bs, pool_size)) | 1586 | if (!biovec_create_pools(bs, pool_size)) |
1583 | return bs; | 1587 | return bs; |
1584 | 1588 | ||
@@ -1616,6 +1620,7 @@ static int __init init_bio(void) | |||
1616 | if (!bio_slabs) | 1620 | if (!bio_slabs) |
1617 | panic("bio: can't allocate bios\n"); | 1621 | panic("bio: can't allocate bios\n"); |
1618 | 1622 | ||
1623 | bio_integrity_init(); | ||
1619 | biovec_init_slabs(); | 1624 | biovec_init_slabs(); |
1620 | 1625 | ||
1621 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); | 1626 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7f88628a1a72..6e4f6c50a120 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
299 | "btrfs-%s-%d", workers->name, | 299 | "btrfs-%s-%d", workers->name, |
300 | workers->num_workers + i); | 300 | workers->num_workers + i); |
301 | if (IS_ERR(worker->task)) { | 301 | if (IS_ERR(worker->task)) { |
302 | kfree(worker); | ||
303 | ret = PTR_ERR(worker->task); | 302 | ret = PTR_ERR(worker->task); |
303 | kfree(worker); | ||
304 | goto fail; | 304 | goto fail; |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2779c2f5360a..98a873838717 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2074,8 +2074,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2074 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2074 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2075 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2075 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2076 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); | 2076 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); |
2077 | int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root | 2077 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref); |
2078 | *root); | ||
2079 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | 2078 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
2080 | struct btrfs_root *root, | 2079 | struct btrfs_root *root, |
2081 | struct extent_buffer *node, | 2080 | struct extent_buffer *node, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index edc7d208c5ce..a5aca3997d42 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner) | |||
990 | return type; | 990 | return type; |
991 | } | 991 | } |
992 | 992 | ||
993 | static int find_next_key(struct btrfs_path *path, struct btrfs_key *key) | 993 | static int find_next_key(struct btrfs_path *path, int level, |
994 | struct btrfs_key *key) | ||
994 | 995 | ||
995 | { | 996 | { |
996 | int level; | 997 | for (; level < BTRFS_MAX_LEVEL; level++) { |
997 | BUG_ON(!path->keep_locks); | ||
998 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | ||
999 | if (!path->nodes[level]) | 998 | if (!path->nodes[level]) |
1000 | break; | 999 | break; |
1001 | btrfs_assert_tree_locked(path->nodes[level]); | ||
1002 | if (path->slots[level] + 1 >= | 1000 | if (path->slots[level] + 1 >= |
1003 | btrfs_header_nritems(path->nodes[level])) | 1001 | btrfs_header_nritems(path->nodes[level])) |
1004 | continue; | 1002 | continue; |
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, | |||
1158 | * For simplicity, we just do not add new inline back | 1156 | * For simplicity, we just do not add new inline back |
1159 | * ref if there is any kind of item for this block | 1157 | * ref if there is any kind of item for this block |
1160 | */ | 1158 | */ |
1161 | if (find_next_key(path, &key) == 0 && key.objectid == bytenr && | 1159 | if (find_next_key(path, 0, &key) == 0 && |
1160 | key.objectid == bytenr && | ||
1162 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { | 1161 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { |
1163 | err = -EAGAIN; | 1162 | err = -EAGAIN; |
1164 | goto out; | 1163 | goto out; |
@@ -2697,7 +2696,7 @@ again: | |||
2697 | 2696 | ||
2698 | printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" | 2697 | printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" |
2699 | ", %llu bytes_used, %llu bytes_reserved, " | 2698 | ", %llu bytes_used, %llu bytes_reserved, " |
2700 | "%llu bytes_pinned, %llu bytes_readonly, %llu may use" | 2699 | "%llu bytes_pinned, %llu bytes_readonly, %llu may use " |
2701 | "%llu total\n", (unsigned long long)bytes, | 2700 | "%llu total\n", (unsigned long long)bytes, |
2702 | (unsigned long long)data_sinfo->bytes_delalloc, | 2701 | (unsigned long long)data_sinfo->bytes_delalloc, |
2703 | (unsigned long long)data_sinfo->bytes_used, | 2702 | (unsigned long long)data_sinfo->bytes_used, |
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
4128 | return buf; | 4127 | return buf; |
4129 | } | 4128 | } |
4130 | 4129 | ||
4130 | #if 0 | ||
4131 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | 4131 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, |
4132 | struct btrfs_root *root, struct extent_buffer *leaf) | 4132 | struct btrfs_root *root, struct extent_buffer *leaf) |
4133 | { | 4133 | { |
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | |||
4171 | return 0; | 4171 | return 0; |
4172 | } | 4172 | } |
4173 | 4173 | ||
4174 | #if 0 | ||
4175 | |||
4176 | static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, | 4174 | static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, |
4177 | struct btrfs_root *root, | 4175 | struct btrfs_root *root, |
4178 | struct btrfs_leaf_ref *ref) | 4176 | struct btrfs_leaf_ref *ref) |
@@ -4553,262 +4551,471 @@ out: | |||
4553 | } | 4551 | } |
4554 | #endif | 4552 | #endif |
4555 | 4553 | ||
4554 | struct walk_control { | ||
4555 | u64 refs[BTRFS_MAX_LEVEL]; | ||
4556 | u64 flags[BTRFS_MAX_LEVEL]; | ||
4557 | struct btrfs_key update_progress; | ||
4558 | int stage; | ||
4559 | int level; | ||
4560 | int shared_level; | ||
4561 | int update_ref; | ||
4562 | int keep_locks; | ||
4563 | }; | ||
4564 | |||
4565 | #define DROP_REFERENCE 1 | ||
4566 | #define UPDATE_BACKREF 2 | ||
4567 | |||
4556 | /* | 4568 | /* |
4557 | * helper function for drop_subtree, this function is similar to | 4569 | * hepler to process tree block while walking down the tree. |
4558 | * walk_down_tree. The main difference is that it checks reference | 4570 | * |
4559 | * counts while tree blocks are locked. | 4571 | * when wc->stage == DROP_REFERENCE, this function checks |
4572 | * reference count of the block. if the block is shared and | ||
4573 | * we need update back refs for the subtree rooted at the | ||
4574 | * block, this function changes wc->stage to UPDATE_BACKREF | ||
4575 | * | ||
4576 | * when wc->stage == UPDATE_BACKREF, this function updates | ||
4577 | * back refs for pointers in the block. | ||
4578 | * | ||
4579 | * NOTE: return value 1 means we should stop walking down. | ||
4560 | */ | 4580 | */ |
4561 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | 4581 | static noinline int walk_down_proc(struct btrfs_trans_handle *trans, |
4562 | struct btrfs_root *root, | 4582 | struct btrfs_root *root, |
4563 | struct btrfs_path *path, int *level) | 4583 | struct btrfs_path *path, |
4584 | struct walk_control *wc) | ||
4564 | { | 4585 | { |
4565 | struct extent_buffer *next; | 4586 | int level = wc->level; |
4566 | struct extent_buffer *cur; | 4587 | struct extent_buffer *eb = path->nodes[level]; |
4567 | struct extent_buffer *parent; | 4588 | struct btrfs_key key; |
4568 | u64 bytenr; | 4589 | u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
4569 | u64 ptr_gen; | ||
4570 | u64 refs; | ||
4571 | u64 flags; | ||
4572 | u32 blocksize; | ||
4573 | int ret; | 4590 | int ret; |
4574 | 4591 | ||
4575 | cur = path->nodes[*level]; | 4592 | if (wc->stage == UPDATE_BACKREF && |
4576 | ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len, | 4593 | btrfs_header_owner(eb) != root->root_key.objectid) |
4577 | &refs, &flags); | 4594 | return 1; |
4578 | BUG_ON(ret); | ||
4579 | if (refs > 1) | ||
4580 | goto out; | ||
4581 | 4595 | ||
4582 | BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | 4596 | /* |
4597 | * when reference count of tree block is 1, it won't increase | ||
4598 | * again. once full backref flag is set, we never clear it. | ||
4599 | */ | ||
4600 | if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || | ||
4601 | (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) { | ||
4602 | BUG_ON(!path->locks[level]); | ||
4603 | ret = btrfs_lookup_extent_info(trans, root, | ||
4604 | eb->start, eb->len, | ||
4605 | &wc->refs[level], | ||
4606 | &wc->flags[level]); | ||
4607 | BUG_ON(ret); | ||
4608 | BUG_ON(wc->refs[level] == 0); | ||
4609 | } | ||
4583 | 4610 | ||
4584 | while (*level >= 0) { | 4611 | if (wc->stage == DROP_REFERENCE && |
4585 | cur = path->nodes[*level]; | 4612 | wc->update_ref && wc->refs[level] > 1) { |
4586 | if (*level == 0) { | 4613 | BUG_ON(eb == root->node); |
4587 | ret = btrfs_drop_leaf_ref(trans, root, cur); | 4614 | BUG_ON(path->slots[level] > 0); |
4588 | BUG_ON(ret); | 4615 | if (level == 0) |
4589 | clean_tree_block(trans, root, cur); | 4616 | btrfs_item_key_to_cpu(eb, &key, path->slots[level]); |
4590 | break; | 4617 | else |
4591 | } | 4618 | btrfs_node_key_to_cpu(eb, &key, path->slots[level]); |
4592 | if (path->slots[*level] >= btrfs_header_nritems(cur)) { | 4619 | if (btrfs_header_owner(eb) == root->root_key.objectid && |
4593 | clean_tree_block(trans, root, cur); | 4620 | btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) { |
4594 | break; | 4621 | wc->stage = UPDATE_BACKREF; |
4622 | wc->shared_level = level; | ||
4595 | } | 4623 | } |
4624 | } | ||
4596 | 4625 | ||
4597 | bytenr = btrfs_node_blockptr(cur, path->slots[*level]); | 4626 | if (wc->stage == DROP_REFERENCE) { |
4598 | blocksize = btrfs_level_size(root, *level - 1); | 4627 | if (wc->refs[level] > 1) |
4599 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); | 4628 | return 1; |
4600 | 4629 | ||
4601 | next = read_tree_block(root, bytenr, blocksize, ptr_gen); | 4630 | if (path->locks[level] && !wc->keep_locks) { |
4602 | btrfs_tree_lock(next); | 4631 | btrfs_tree_unlock(eb); |
4603 | btrfs_set_lock_blocking(next); | 4632 | path->locks[level] = 0; |
4633 | } | ||
4634 | return 0; | ||
4635 | } | ||
4604 | 4636 | ||
4605 | ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, | 4637 | /* wc->stage == UPDATE_BACKREF */ |
4606 | &refs, &flags); | 4638 | if (!(wc->flags[level] & flag)) { |
4639 | BUG_ON(!path->locks[level]); | ||
4640 | ret = btrfs_inc_ref(trans, root, eb, 1); | ||
4607 | BUG_ON(ret); | 4641 | BUG_ON(ret); |
4608 | if (refs > 1) { | 4642 | ret = btrfs_dec_ref(trans, root, eb, 0); |
4609 | parent = path->nodes[*level]; | 4643 | BUG_ON(ret); |
4610 | ret = btrfs_free_extent(trans, root, bytenr, | 4644 | ret = btrfs_set_disk_extent_flags(trans, root, eb->start, |
4611 | blocksize, parent->start, | 4645 | eb->len, flag, 0); |
4612 | btrfs_header_owner(parent), | 4646 | BUG_ON(ret); |
4613 | *level - 1, 0); | 4647 | wc->flags[level] |= flag; |
4648 | } | ||
4649 | |||
4650 | /* | ||
4651 | * the block is shared by multiple trees, so it's not good to | ||
4652 | * keep the tree lock | ||
4653 | */ | ||
4654 | if (path->locks[level] && level > 0) { | ||
4655 | btrfs_tree_unlock(eb); | ||
4656 | path->locks[level] = 0; | ||
4657 | } | ||
4658 | return 0; | ||
4659 | } | ||
4660 | |||
4661 | /* | ||
4662 | * hepler to process tree block while walking up the tree. | ||
4663 | * | ||
4664 | * when wc->stage == DROP_REFERENCE, this function drops | ||
4665 | * reference count on the block. | ||
4666 | * | ||
4667 | * when wc->stage == UPDATE_BACKREF, this function changes | ||
4668 | * wc->stage back to DROP_REFERENCE if we changed wc->stage | ||
4669 | * to UPDATE_BACKREF previously while processing the block. | ||
4670 | * | ||
4671 | * NOTE: return value 1 means we should stop walking up. | ||
4672 | */ | ||
4673 | static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | ||
4674 | struct btrfs_root *root, | ||
4675 | struct btrfs_path *path, | ||
4676 | struct walk_control *wc) | ||
4677 | { | ||
4678 | int ret = 0; | ||
4679 | int level = wc->level; | ||
4680 | struct extent_buffer *eb = path->nodes[level]; | ||
4681 | u64 parent = 0; | ||
4682 | |||
4683 | if (wc->stage == UPDATE_BACKREF) { | ||
4684 | BUG_ON(wc->shared_level < level); | ||
4685 | if (level < wc->shared_level) | ||
4686 | goto out; | ||
4687 | |||
4688 | BUG_ON(wc->refs[level] <= 1); | ||
4689 | ret = find_next_key(path, level + 1, &wc->update_progress); | ||
4690 | if (ret > 0) | ||
4691 | wc->update_ref = 0; | ||
4692 | |||
4693 | wc->stage = DROP_REFERENCE; | ||
4694 | wc->shared_level = -1; | ||
4695 | path->slots[level] = 0; | ||
4696 | |||
4697 | /* | ||
4698 | * check reference count again if the block isn't locked. | ||
4699 | * we should start walking down the tree again if reference | ||
4700 | * count is one. | ||
4701 | */ | ||
4702 | if (!path->locks[level]) { | ||
4703 | BUG_ON(level == 0); | ||
4704 | btrfs_tree_lock(eb); | ||
4705 | btrfs_set_lock_blocking(eb); | ||
4706 | path->locks[level] = 1; | ||
4707 | |||
4708 | ret = btrfs_lookup_extent_info(trans, root, | ||
4709 | eb->start, eb->len, | ||
4710 | &wc->refs[level], | ||
4711 | &wc->flags[level]); | ||
4614 | BUG_ON(ret); | 4712 | BUG_ON(ret); |
4615 | path->slots[*level]++; | 4713 | BUG_ON(wc->refs[level] == 0); |
4616 | btrfs_tree_unlock(next); | 4714 | if (wc->refs[level] == 1) { |
4617 | free_extent_buffer(next); | 4715 | btrfs_tree_unlock(eb); |
4618 | continue; | 4716 | path->locks[level] = 0; |
4717 | return 1; | ||
4718 | } | ||
4719 | } else { | ||
4720 | BUG_ON(level != 0); | ||
4619 | } | 4721 | } |
4722 | } | ||
4620 | 4723 | ||
4621 | BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | 4724 | /* wc->stage == DROP_REFERENCE */ |
4725 | BUG_ON(wc->refs[level] > 1 && !path->locks[level]); | ||
4622 | 4726 | ||
4623 | *level = btrfs_header_level(next); | 4727 | if (wc->refs[level] == 1) { |
4624 | path->nodes[*level] = next; | 4728 | if (level == 0) { |
4625 | path->slots[*level] = 0; | 4729 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
4626 | path->locks[*level] = 1; | 4730 | ret = btrfs_dec_ref(trans, root, eb, 1); |
4627 | cond_resched(); | 4731 | else |
4732 | ret = btrfs_dec_ref(trans, root, eb, 0); | ||
4733 | BUG_ON(ret); | ||
4734 | } | ||
4735 | /* make block locked assertion in clean_tree_block happy */ | ||
4736 | if (!path->locks[level] && | ||
4737 | btrfs_header_generation(eb) == trans->transid) { | ||
4738 | btrfs_tree_lock(eb); | ||
4739 | btrfs_set_lock_blocking(eb); | ||
4740 | path->locks[level] = 1; | ||
4741 | } | ||
4742 | clean_tree_block(trans, root, eb); | ||
4743 | } | ||
4744 | |||
4745 | if (eb == root->node) { | ||
4746 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | ||
4747 | parent = eb->start; | ||
4748 | else | ||
4749 | BUG_ON(root->root_key.objectid != | ||
4750 | btrfs_header_owner(eb)); | ||
4751 | } else { | ||
4752 | if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | ||
4753 | parent = path->nodes[level + 1]->start; | ||
4754 | else | ||
4755 | BUG_ON(root->root_key.objectid != | ||
4756 | btrfs_header_owner(path->nodes[level + 1])); | ||
4628 | } | 4757 | } |
4629 | out: | ||
4630 | if (path->nodes[*level] == root->node) | ||
4631 | parent = path->nodes[*level]; | ||
4632 | else | ||
4633 | parent = path->nodes[*level + 1]; | ||
4634 | bytenr = path->nodes[*level]->start; | ||
4635 | blocksize = path->nodes[*level]->len; | ||
4636 | 4758 | ||
4637 | ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, | 4759 | ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent, |
4638 | btrfs_header_owner(parent), *level, 0); | 4760 | root->root_key.objectid, level, 0); |
4639 | BUG_ON(ret); | 4761 | BUG_ON(ret); |
4762 | out: | ||
4763 | wc->refs[level] = 0; | ||
4764 | wc->flags[level] = 0; | ||
4765 | return ret; | ||
4766 | } | ||
4767 | |||
4768 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | ||
4769 | struct btrfs_root *root, | ||
4770 | struct btrfs_path *path, | ||
4771 | struct walk_control *wc) | ||
4772 | { | ||
4773 | struct extent_buffer *next; | ||
4774 | struct extent_buffer *cur; | ||
4775 | u64 bytenr; | ||
4776 | u64 ptr_gen; | ||
4777 | u32 blocksize; | ||
4778 | int level = wc->level; | ||
4779 | int ret; | ||
4780 | |||
4781 | while (level >= 0) { | ||
4782 | cur = path->nodes[level]; | ||
4783 | BUG_ON(path->slots[level] >= btrfs_header_nritems(cur)); | ||
4640 | 4784 | ||
4641 | if (path->locks[*level]) { | 4785 | ret = walk_down_proc(trans, root, path, wc); |
4642 | btrfs_tree_unlock(path->nodes[*level]); | 4786 | if (ret > 0) |
4643 | path->locks[*level] = 0; | 4787 | break; |
4788 | |||
4789 | if (level == 0) | ||
4790 | break; | ||
4791 | |||
4792 | bytenr = btrfs_node_blockptr(cur, path->slots[level]); | ||
4793 | blocksize = btrfs_level_size(root, level - 1); | ||
4794 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]); | ||
4795 | |||
4796 | next = read_tree_block(root, bytenr, blocksize, ptr_gen); | ||
4797 | btrfs_tree_lock(next); | ||
4798 | btrfs_set_lock_blocking(next); | ||
4799 | |||
4800 | level--; | ||
4801 | BUG_ON(level != btrfs_header_level(next)); | ||
4802 | path->nodes[level] = next; | ||
4803 | path->slots[level] = 0; | ||
4804 | path->locks[level] = 1; | ||
4805 | wc->level = level; | ||
4644 | } | 4806 | } |
4645 | free_extent_buffer(path->nodes[*level]); | ||
4646 | path->nodes[*level] = NULL; | ||
4647 | *level += 1; | ||
4648 | cond_resched(); | ||
4649 | return 0; | 4807 | return 0; |
4650 | } | 4808 | } |
4651 | 4809 | ||
4652 | /* | ||
4653 | * helper for dropping snapshots. This walks back up the tree in the path | ||
4654 | * to find the first node higher up where we haven't yet gone through | ||
4655 | * all the slots | ||
4656 | */ | ||
4657 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, | 4810 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, |
4658 | struct btrfs_root *root, | 4811 | struct btrfs_root *root, |
4659 | struct btrfs_path *path, | 4812 | struct btrfs_path *path, |
4660 | int *level, int max_level) | 4813 | struct walk_control *wc, int max_level) |
4661 | { | 4814 | { |
4662 | struct btrfs_root_item *root_item = &root->root_item; | 4815 | int level = wc->level; |
4663 | int i; | ||
4664 | int slot; | ||
4665 | int ret; | 4816 | int ret; |
4666 | 4817 | ||
4667 | for (i = *level; i < max_level && path->nodes[i]; i++) { | 4818 | path->slots[level] = btrfs_header_nritems(path->nodes[level]); |
4668 | slot = path->slots[i]; | 4819 | while (level < max_level && path->nodes[level]) { |
4669 | if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { | 4820 | wc->level = level; |
4670 | /* | 4821 | if (path->slots[level] + 1 < |
4671 | * there is more work to do in this level. | 4822 | btrfs_header_nritems(path->nodes[level])) { |
4672 | * Update the drop_progress marker to reflect | 4823 | path->slots[level]++; |
4673 | * the work we've done so far, and then bump | ||
4674 | * the slot number | ||
4675 | */ | ||
4676 | path->slots[i]++; | ||
4677 | WARN_ON(*level == 0); | ||
4678 | if (max_level == BTRFS_MAX_LEVEL) { | ||
4679 | btrfs_node_key(path->nodes[i], | ||
4680 | &root_item->drop_progress, | ||
4681 | path->slots[i]); | ||
4682 | root_item->drop_level = i; | ||
4683 | } | ||
4684 | *level = i; | ||
4685 | return 0; | 4824 | return 0; |
4686 | } else { | 4825 | } else { |
4687 | struct extent_buffer *parent; | 4826 | ret = walk_up_proc(trans, root, path, wc); |
4688 | 4827 | if (ret > 0) | |
4689 | /* | 4828 | return 0; |
4690 | * this whole node is done, free our reference | ||
4691 | * on it and go up one level | ||
4692 | */ | ||
4693 | if (path->nodes[*level] == root->node) | ||
4694 | parent = path->nodes[*level]; | ||
4695 | else | ||
4696 | parent = path->nodes[*level + 1]; | ||
4697 | 4829 | ||
4698 | clean_tree_block(trans, root, path->nodes[i]); | 4830 | if (path->locks[level]) { |
4699 | ret = btrfs_free_extent(trans, root, | 4831 | btrfs_tree_unlock(path->nodes[level]); |
4700 | path->nodes[i]->start, | 4832 | path->locks[level] = 0; |
4701 | path->nodes[i]->len, | ||
4702 | parent->start, | ||
4703 | btrfs_header_owner(parent), | ||
4704 | *level, 0); | ||
4705 | BUG_ON(ret); | ||
4706 | if (path->locks[*level]) { | ||
4707 | btrfs_tree_unlock(path->nodes[i]); | ||
4708 | path->locks[i] = 0; | ||
4709 | } | 4833 | } |
4710 | free_extent_buffer(path->nodes[i]); | 4834 | free_extent_buffer(path->nodes[level]); |
4711 | path->nodes[i] = NULL; | 4835 | path->nodes[level] = NULL; |
4712 | *level = i + 1; | 4836 | level++; |
4713 | } | 4837 | } |
4714 | } | 4838 | } |
4715 | return 1; | 4839 | return 1; |
4716 | } | 4840 | } |
4717 | 4841 | ||
4718 | /* | 4842 | /* |
4719 | * drop the reference count on the tree rooted at 'snap'. This traverses | 4843 | * drop a subvolume tree. |
4720 | * the tree freeing any blocks that have a ref count of zero after being | 4844 | * |
4721 | * decremented. | 4845 | * this function traverses the tree freeing any blocks that only |
4846 | * referenced by the tree. | ||
4847 | * | ||
4848 | * when a shared tree block is found. this function decreases its | ||
4849 | * reference count by one. if update_ref is true, this function | ||
4850 | * also make sure backrefs for the shared block and all lower level | ||
4851 | * blocks are properly updated. | ||
4722 | */ | 4852 | */ |
4723 | int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root | 4853 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) |
4724 | *root) | ||
4725 | { | 4854 | { |
4726 | int ret = 0; | ||
4727 | int wret; | ||
4728 | int level; | ||
4729 | struct btrfs_path *path; | 4855 | struct btrfs_path *path; |
4730 | int update_count; | 4856 | struct btrfs_trans_handle *trans; |
4857 | struct btrfs_root *tree_root = root->fs_info->tree_root; | ||
4731 | struct btrfs_root_item *root_item = &root->root_item; | 4858 | struct btrfs_root_item *root_item = &root->root_item; |
4859 | struct walk_control *wc; | ||
4860 | struct btrfs_key key; | ||
4861 | int err = 0; | ||
4862 | int ret; | ||
4863 | int level; | ||
4732 | 4864 | ||
4733 | path = btrfs_alloc_path(); | 4865 | path = btrfs_alloc_path(); |
4734 | BUG_ON(!path); | 4866 | BUG_ON(!path); |
4735 | 4867 | ||
4736 | level = btrfs_header_level(root->node); | 4868 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
4869 | BUG_ON(!wc); | ||
4870 | |||
4871 | trans = btrfs_start_transaction(tree_root, 1); | ||
4872 | |||
4737 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | 4873 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { |
4874 | level = btrfs_header_level(root->node); | ||
4738 | path->nodes[level] = btrfs_lock_root_node(root); | 4875 | path->nodes[level] = btrfs_lock_root_node(root); |
4739 | btrfs_set_lock_blocking(path->nodes[level]); | 4876 | btrfs_set_lock_blocking(path->nodes[level]); |
4740 | path->slots[level] = 0; | 4877 | path->slots[level] = 0; |
4741 | path->locks[level] = 1; | 4878 | path->locks[level] = 1; |
4879 | memset(&wc->update_progress, 0, | ||
4880 | sizeof(wc->update_progress)); | ||
4742 | } else { | 4881 | } else { |
4743 | struct btrfs_key key; | ||
4744 | struct btrfs_disk_key found_key; | ||
4745 | struct extent_buffer *node; | ||
4746 | |||
4747 | btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); | 4882 | btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); |
4883 | memcpy(&wc->update_progress, &key, | ||
4884 | sizeof(wc->update_progress)); | ||
4885 | |||
4748 | level = root_item->drop_level; | 4886 | level = root_item->drop_level; |
4887 | BUG_ON(level == 0); | ||
4749 | path->lowest_level = level; | 4888 | path->lowest_level = level; |
4750 | wret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 4889 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
4751 | if (wret < 0) { | 4890 | path->lowest_level = 0; |
4752 | ret = wret; | 4891 | if (ret < 0) { |
4892 | err = ret; | ||
4753 | goto out; | 4893 | goto out; |
4754 | } | 4894 | } |
4755 | node = path->nodes[level]; | 4895 | btrfs_node_key_to_cpu(path->nodes[level], &key, |
4756 | btrfs_node_key(node, &found_key, path->slots[level]); | 4896 | path->slots[level]); |
4757 | WARN_ON(memcmp(&found_key, &root_item->drop_progress, | 4897 | WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key))); |
4758 | sizeof(found_key))); | 4898 | |
4759 | /* | 4899 | /* |
4760 | * unlock our path, this is safe because only this | 4900 | * unlock our path, this is safe because only this |
4761 | * function is allowed to delete this snapshot | 4901 | * function is allowed to delete this snapshot |
4762 | */ | 4902 | */ |
4763 | btrfs_unlock_up_safe(path, 0); | 4903 | btrfs_unlock_up_safe(path, 0); |
4904 | |||
4905 | level = btrfs_header_level(root->node); | ||
4906 | while (1) { | ||
4907 | btrfs_tree_lock(path->nodes[level]); | ||
4908 | btrfs_set_lock_blocking(path->nodes[level]); | ||
4909 | |||
4910 | ret = btrfs_lookup_extent_info(trans, root, | ||
4911 | path->nodes[level]->start, | ||
4912 | path->nodes[level]->len, | ||
4913 | &wc->refs[level], | ||
4914 | &wc->flags[level]); | ||
4915 | BUG_ON(ret); | ||
4916 | BUG_ON(wc->refs[level] == 0); | ||
4917 | |||
4918 | if (level == root_item->drop_level) | ||
4919 | break; | ||
4920 | |||
4921 | btrfs_tree_unlock(path->nodes[level]); | ||
4922 | WARN_ON(wc->refs[level] != 1); | ||
4923 | level--; | ||
4924 | } | ||
4764 | } | 4925 | } |
4926 | |||
4927 | wc->level = level; | ||
4928 | wc->shared_level = -1; | ||
4929 | wc->stage = DROP_REFERENCE; | ||
4930 | wc->update_ref = update_ref; | ||
4931 | wc->keep_locks = 0; | ||
4932 | |||
4765 | while (1) { | 4933 | while (1) { |
4766 | unsigned long update; | 4934 | ret = walk_down_tree(trans, root, path, wc); |
4767 | wret = walk_down_tree(trans, root, path, &level); | 4935 | if (ret < 0) { |
4768 | if (wret > 0) | 4936 | err = ret; |
4769 | break; | 4937 | break; |
4770 | if (wret < 0) | 4938 | } |
4771 | ret = wret; | ||
4772 | 4939 | ||
4773 | wret = walk_up_tree(trans, root, path, &level, | 4940 | ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); |
4774 | BTRFS_MAX_LEVEL); | 4941 | if (ret < 0) { |
4775 | if (wret > 0) | 4942 | err = ret; |
4776 | break; | 4943 | break; |
4777 | if (wret < 0) | 4944 | } |
4778 | ret = wret; | 4945 | |
4779 | if (trans->transaction->in_commit || | 4946 | if (ret > 0) { |
4780 | trans->transaction->delayed_refs.flushing) { | 4947 | BUG_ON(wc->stage != DROP_REFERENCE); |
4781 | ret = -EAGAIN; | ||
4782 | break; | 4948 | break; |
4783 | } | 4949 | } |
4784 | for (update_count = 0; update_count < 16; update_count++) { | 4950 | |
4951 | if (wc->stage == DROP_REFERENCE) { | ||
4952 | level = wc->level; | ||
4953 | btrfs_node_key(path->nodes[level], | ||
4954 | &root_item->drop_progress, | ||
4955 | path->slots[level]); | ||
4956 | root_item->drop_level = level; | ||
4957 | } | ||
4958 | |||
4959 | BUG_ON(wc->level == 0); | ||
4960 | if (trans->transaction->in_commit || | ||
4961 | trans->transaction->delayed_refs.flushing) { | ||
4962 | ret = btrfs_update_root(trans, tree_root, | ||
4963 | &root->root_key, | ||
4964 | root_item); | ||
4965 | BUG_ON(ret); | ||
4966 | |||
4967 | btrfs_end_transaction(trans, tree_root); | ||
4968 | trans = btrfs_start_transaction(tree_root, 1); | ||
4969 | } else { | ||
4970 | unsigned long update; | ||
4785 | update = trans->delayed_ref_updates; | 4971 | update = trans->delayed_ref_updates; |
4786 | trans->delayed_ref_updates = 0; | 4972 | trans->delayed_ref_updates = 0; |
4787 | if (update) | 4973 | if (update) |
4788 | btrfs_run_delayed_refs(trans, root, update); | 4974 | btrfs_run_delayed_refs(trans, tree_root, |
4789 | else | 4975 | update); |
4790 | break; | ||
4791 | } | 4976 | } |
4792 | } | 4977 | } |
4978 | btrfs_release_path(root, path); | ||
4979 | BUG_ON(err); | ||
4980 | |||
4981 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | ||
4982 | BUG_ON(ret); | ||
4983 | |||
4984 | free_extent_buffer(root->node); | ||
4985 | free_extent_buffer(root->commit_root); | ||
4986 | kfree(root); | ||
4793 | out: | 4987 | out: |
4988 | btrfs_end_transaction(trans, tree_root); | ||
4989 | kfree(wc); | ||
4794 | btrfs_free_path(path); | 4990 | btrfs_free_path(path); |
4795 | return ret; | 4991 | return err; |
4796 | } | 4992 | } |
4797 | 4993 | ||
4994 | /* | ||
4995 | * drop subtree rooted at tree block 'node'. | ||
4996 | * | ||
4997 | * NOTE: this function will unlock and release tree block 'node' | ||
4998 | */ | ||
4798 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | 4999 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
4799 | struct btrfs_root *root, | 5000 | struct btrfs_root *root, |
4800 | struct extent_buffer *node, | 5001 | struct extent_buffer *node, |
4801 | struct extent_buffer *parent) | 5002 | struct extent_buffer *parent) |
4802 | { | 5003 | { |
4803 | struct btrfs_path *path; | 5004 | struct btrfs_path *path; |
5005 | struct walk_control *wc; | ||
4804 | int level; | 5006 | int level; |
4805 | int parent_level; | 5007 | int parent_level; |
4806 | int ret = 0; | 5008 | int ret = 0; |
4807 | int wret; | 5009 | int wret; |
4808 | 5010 | ||
5011 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | ||
5012 | |||
4809 | path = btrfs_alloc_path(); | 5013 | path = btrfs_alloc_path(); |
4810 | BUG_ON(!path); | 5014 | BUG_ON(!path); |
4811 | 5015 | ||
5016 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | ||
5017 | BUG_ON(!wc); | ||
5018 | |||
4812 | btrfs_assert_tree_locked(parent); | 5019 | btrfs_assert_tree_locked(parent); |
4813 | parent_level = btrfs_header_level(parent); | 5020 | parent_level = btrfs_header_level(parent); |
4814 | extent_buffer_get(parent); | 5021 | extent_buffer_get(parent); |
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |||
4817 | 5024 | ||
4818 | btrfs_assert_tree_locked(node); | 5025 | btrfs_assert_tree_locked(node); |
4819 | level = btrfs_header_level(node); | 5026 | level = btrfs_header_level(node); |
4820 | extent_buffer_get(node); | ||
4821 | path->nodes[level] = node; | 5027 | path->nodes[level] = node; |
4822 | path->slots[level] = 0; | 5028 | path->slots[level] = 0; |
5029 | path->locks[level] = 1; | ||
5030 | |||
5031 | wc->refs[parent_level] = 1; | ||
5032 | wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||
5033 | wc->level = level; | ||
5034 | wc->shared_level = -1; | ||
5035 | wc->stage = DROP_REFERENCE; | ||
5036 | wc->update_ref = 0; | ||
5037 | wc->keep_locks = 1; | ||
4823 | 5038 | ||
4824 | while (1) { | 5039 | while (1) { |
4825 | wret = walk_down_tree(trans, root, path, &level); | 5040 | wret = walk_down_tree(trans, root, path, wc); |
4826 | if (wret < 0) | 5041 | if (wret < 0) { |
4827 | ret = wret; | 5042 | ret = wret; |
4828 | if (wret != 0) | ||
4829 | break; | 5043 | break; |
5044 | } | ||
4830 | 5045 | ||
4831 | wret = walk_up_tree(trans, root, path, &level, parent_level); | 5046 | wret = walk_up_tree(trans, root, path, wc, parent_level); |
4832 | if (wret < 0) | 5047 | if (wret < 0) |
4833 | ret = wret; | 5048 | ret = wret; |
4834 | if (wret != 0) | 5049 | if (wret != 0) |
4835 | break; | 5050 | break; |
4836 | } | 5051 | } |
4837 | 5052 | ||
5053 | kfree(wc); | ||
4838 | btrfs_free_path(path); | 5054 | btrfs_free_path(path); |
4839 | return ret; | 5055 | return ret; |
4840 | } | 5056 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 126477eaecf5..7c3cd248d8d6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -151,7 +151,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
151 | } | 151 | } |
152 | if (end_pos > isize) { | 152 | if (end_pos > isize) { |
153 | i_size_write(inode, end_pos); | 153 | i_size_write(inode, end_pos); |
154 | btrfs_update_inode(trans, root, inode); | 154 | /* we've only changed i_size in ram, and we haven't updated |
155 | * the disk i_size. There is no need to log the inode | ||
156 | * at this time. | ||
157 | */ | ||
155 | } | 158 | } |
156 | err = btrfs_end_transaction(trans, root); | 159 | err = btrfs_end_transaction(trans, root); |
157 | out_unlock: | 160 | out_unlock: |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dbe1aabf96cd..7ffa3d34ea19 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3580,12 +3580,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
3580 | owner = 1; | 3580 | owner = 1; |
3581 | BTRFS_I(inode)->block_group = | 3581 | BTRFS_I(inode)->block_group = |
3582 | btrfs_find_block_group(root, 0, alloc_hint, owner); | 3582 | btrfs_find_block_group(root, 0, alloc_hint, owner); |
3583 | if ((mode & S_IFREG)) { | ||
3584 | if (btrfs_test_opt(root, NODATASUM)) | ||
3585 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | ||
3586 | if (btrfs_test_opt(root, NODATACOW)) | ||
3587 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | ||
3588 | } | ||
3589 | 3583 | ||
3590 | key[0].objectid = objectid; | 3584 | key[0].objectid = objectid; |
3591 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | 3585 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); |
@@ -3640,6 +3634,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
3640 | 3634 | ||
3641 | btrfs_inherit_iflags(inode, dir); | 3635 | btrfs_inherit_iflags(inode, dir); |
3642 | 3636 | ||
3637 | if ((mode & S_IFREG)) { | ||
3638 | if (btrfs_test_opt(root, NODATASUM)) | ||
3639 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | ||
3640 | if (btrfs_test_opt(root, NODATACOW)) | ||
3641 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | ||
3642 | } | ||
3643 | |||
3643 | insert_inode_hash(inode); | 3644 | insert_inode_hash(inode); |
3644 | inode_tree_add(inode); | 3645 | inode_tree_add(inode); |
3645 | return inode; | 3646 | return inode; |
@@ -5082,6 +5083,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5082 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | 5083 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; |
5083 | struct extent_map *em; | 5084 | struct extent_map *em; |
5084 | struct btrfs_trans_handle *trans; | 5085 | struct btrfs_trans_handle *trans; |
5086 | struct btrfs_root *root; | ||
5085 | int ret; | 5087 | int ret; |
5086 | 5088 | ||
5087 | alloc_start = offset & ~mask; | 5089 | alloc_start = offset & ~mask; |
@@ -5100,6 +5102,13 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5100 | goto out; | 5102 | goto out; |
5101 | } | 5103 | } |
5102 | 5104 | ||
5105 | root = BTRFS_I(inode)->root; | ||
5106 | |||
5107 | ret = btrfs_check_data_free_space(root, inode, | ||
5108 | alloc_end - alloc_start); | ||
5109 | if (ret) | ||
5110 | goto out; | ||
5111 | |||
5103 | locked_end = alloc_end - 1; | 5112 | locked_end = alloc_end - 1; |
5104 | while (1) { | 5113 | while (1) { |
5105 | struct btrfs_ordered_extent *ordered; | 5114 | struct btrfs_ordered_extent *ordered; |
@@ -5107,7 +5116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5107 | trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); | 5116 | trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); |
5108 | if (!trans) { | 5117 | if (!trans) { |
5109 | ret = -EIO; | 5118 | ret = -EIO; |
5110 | goto out; | 5119 | goto out_free; |
5111 | } | 5120 | } |
5112 | 5121 | ||
5113 | /* the extent lock is ordered inside the running | 5122 | /* the extent lock is ordered inside the running |
@@ -5168,6 +5177,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
5168 | GFP_NOFS); | 5177 | GFP_NOFS); |
5169 | 5178 | ||
5170 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); | 5179 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); |
5180 | out_free: | ||
5181 | btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start); | ||
5171 | out: | 5182 | out: |
5172 | mutex_unlock(&inode->i_mutex); | 5183 | mutex_unlock(&inode->i_mutex); |
5173 | return ret; | 5184 | return ret; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index eff18f5b5362..9f4db848db10 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1028,7 +1028,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1028 | struct btrfs_file_extent_item); | 1028 | struct btrfs_file_extent_item); |
1029 | comp = btrfs_file_extent_compression(leaf, extent); | 1029 | comp = btrfs_file_extent_compression(leaf, extent); |
1030 | type = btrfs_file_extent_type(leaf, extent); | 1030 | type = btrfs_file_extent_type(leaf, extent); |
1031 | if (type == BTRFS_FILE_EXTENT_REG) { | 1031 | if (type == BTRFS_FILE_EXTENT_REG || |
1032 | type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
1032 | disko = btrfs_file_extent_disk_bytenr(leaf, | 1033 | disko = btrfs_file_extent_disk_bytenr(leaf, |
1033 | extent); | 1034 | extent); |
1034 | diskl = btrfs_file_extent_disk_num_bytes(leaf, | 1035 | diskl = btrfs_file_extent_disk_num_bytes(leaf, |
@@ -1051,7 +1052,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1051 | new_key.objectid = inode->i_ino; | 1052 | new_key.objectid = inode->i_ino; |
1052 | new_key.offset = key.offset + destoff - off; | 1053 | new_key.offset = key.offset + destoff - off; |
1053 | 1054 | ||
1054 | if (type == BTRFS_FILE_EXTENT_REG) { | 1055 | if (type == BTRFS_FILE_EXTENT_REG || |
1056 | type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
1055 | ret = btrfs_insert_empty_item(trans, root, path, | 1057 | ret = btrfs_insert_empty_item(trans, root, path, |
1056 | &new_key, size); | 1058 | &new_key, size); |
1057 | if (ret) | 1059 | if (ret) |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index b23dc209ae10..008397934778 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1788,7 +1788,7 @@ static void merge_func(struct btrfs_work *work) | |||
1788 | btrfs_end_transaction(trans, root); | 1788 | btrfs_end_transaction(trans, root); |
1789 | } | 1789 | } |
1790 | 1790 | ||
1791 | btrfs_drop_dead_root(reloc_root); | 1791 | btrfs_drop_snapshot(reloc_root, 0); |
1792 | 1792 | ||
1793 | if (atomic_dec_and_test(async->num_pending)) | 1793 | if (atomic_dec_and_test(async->num_pending)) |
1794 | complete(async->done); | 1794 | complete(async->done); |
@@ -2075,9 +2075,6 @@ static int do_relocation(struct btrfs_trans_handle *trans, | |||
2075 | 2075 | ||
2076 | ret = btrfs_drop_subtree(trans, root, eb, upper->eb); | 2076 | ret = btrfs_drop_subtree(trans, root, eb, upper->eb); |
2077 | BUG_ON(ret); | 2077 | BUG_ON(ret); |
2078 | |||
2079 | btrfs_tree_unlock(eb); | ||
2080 | free_extent_buffer(eb); | ||
2081 | } | 2078 | } |
2082 | if (!lowest) { | 2079 | if (!lowest) { |
2083 | btrfs_tree_unlock(upper->eb); | 2080 | btrfs_tree_unlock(upper->eb); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4e83457ea253..2dbf1c1f56ee 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -593,6 +593,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | |||
593 | return 0; | 593 | return 0; |
594 | } | 594 | } |
595 | 595 | ||
596 | #if 0 | ||
596 | /* | 597 | /* |
597 | * when dropping snapshots, we generate a ton of delayed refs, and it makes | 598 | * when dropping snapshots, we generate a ton of delayed refs, and it makes |
598 | * sense not to join the transaction while it is trying to flush the current | 599 | * sense not to join the transaction while it is trying to flush the current |
@@ -681,6 +682,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root) | |||
681 | btrfs_btree_balance_dirty(tree_root, nr); | 682 | btrfs_btree_balance_dirty(tree_root, nr); |
682 | return ret; | 683 | return ret; |
683 | } | 684 | } |
685 | #endif | ||
684 | 686 | ||
685 | /* | 687 | /* |
686 | * new snapshots need to be created at a very specific time in the | 688 | * new snapshots need to be created at a very specific time in the |
@@ -1081,7 +1083,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) | |||
1081 | while (!list_empty(&list)) { | 1083 | while (!list_empty(&list)) { |
1082 | root = list_entry(list.next, struct btrfs_root, root_list); | 1084 | root = list_entry(list.next, struct btrfs_root, root_list); |
1083 | list_del_init(&root->root_list); | 1085 | list_del_init(&root->root_list); |
1084 | btrfs_drop_dead_root(root); | 1086 | btrfs_drop_snapshot(root, 0); |
1085 | } | 1087 | } |
1086 | return 0; | 1088 | return 0; |
1087 | } | 1089 | } |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index b48689839428..3a9b7a58a51d 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -5,7 +5,7 @@ client generated ones by default (mount option "serverino" turned | |||
5 | on by default if server supports it). Add forceuid and forcegid | 5 | on by default if server supports it). Add forceuid and forcegid |
6 | mount options (so that when negotiating unix extensions specifying | 6 | mount options (so that when negotiating unix extensions specifying |
7 | which uid mounted does not immediately force the server's reported | 7 | which uid mounted does not immediately force the server's reported |
8 | uids to be overridden). | 8 | uids to be overridden). Add support for scope moutn parm. |
9 | 9 | ||
10 | Version 1.58 | 10 | Version 1.58 |
11 | ------------ | 11 | ------------ |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 1b09f1670061..20692fbfdb24 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #define ASN1_OJI 6 /* Object Identifier */ | 49 | #define ASN1_OJI 6 /* Object Identifier */ |
50 | #define ASN1_OJD 7 /* Object Description */ | 50 | #define ASN1_OJD 7 /* Object Description */ |
51 | #define ASN1_EXT 8 /* External */ | 51 | #define ASN1_EXT 8 /* External */ |
52 | #define ASN1_ENUM 10 /* Enumerated */ | ||
52 | #define ASN1_SEQ 16 /* Sequence */ | 53 | #define ASN1_SEQ 16 /* Sequence */ |
53 | #define ASN1_SET 17 /* Set */ | 54 | #define ASN1_SET 17 /* Set */ |
54 | #define ASN1_NUMSTR 18 /* Numerical String */ | 55 | #define ASN1_NUMSTR 18 /* Numerical String */ |
@@ -78,10 +79,12 @@ | |||
78 | #define SPNEGO_OID_LEN 7 | 79 | #define SPNEGO_OID_LEN 7 |
79 | #define NTLMSSP_OID_LEN 10 | 80 | #define NTLMSSP_OID_LEN 10 |
80 | #define KRB5_OID_LEN 7 | 81 | #define KRB5_OID_LEN 7 |
82 | #define KRB5U2U_OID_LEN 8 | ||
81 | #define MSKRB5_OID_LEN 7 | 83 | #define MSKRB5_OID_LEN 7 |
82 | static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 }; | 84 | static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 }; |
83 | static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 }; | 85 | static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 }; |
84 | static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 }; | 86 | static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 }; |
87 | static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 }; | ||
85 | static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 }; | 88 | static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 }; |
86 | 89 | ||
87 | /* | 90 | /* |
@@ -122,6 +125,28 @@ asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) | |||
122 | return 1; | 125 | return 1; |
123 | } | 126 | } |
124 | 127 | ||
128 | #if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */ | ||
129 | static unsigned char | ||
130 | asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val) | ||
131 | { | ||
132 | unsigned char ch; | ||
133 | |||
134 | if (ctx->pointer >= ctx->end) { | ||
135 | ctx->error = ASN1_ERR_DEC_EMPTY; | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to lenght octet */ | ||
140 | if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */ | ||
141 | *val = *(++(ctx->pointer)); /* value has enum value */ | ||
142 | else | ||
143 | return 0; | ||
144 | |||
145 | ctx->pointer++; | ||
146 | return 1; | ||
147 | } | ||
148 | #endif | ||
149 | |||
125 | static unsigned char | 150 | static unsigned char |
126 | asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | 151 | asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) |
127 | { | 152 | { |
@@ -476,10 +501,9 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
476 | unsigned int cls, con, tag, oidlen, rc; | 501 | unsigned int cls, con, tag, oidlen, rc; |
477 | bool use_ntlmssp = false; | 502 | bool use_ntlmssp = false; |
478 | bool use_kerberos = false; | 503 | bool use_kerberos = false; |
504 | bool use_kerberosu2u = false; | ||
479 | bool use_mskerberos = false; | 505 | bool use_mskerberos = false; |
480 | 506 | ||
481 | *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/ | ||
482 | |||
483 | /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */ | 507 | /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */ |
484 | 508 | ||
485 | asn1_open(&ctx, security_blob, length); | 509 | asn1_open(&ctx, security_blob, length); |
@@ -515,6 +539,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
515 | return 0; | 539 | return 0; |
516 | } | 540 | } |
517 | 541 | ||
542 | /* SPNEGO */ | ||
518 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 543 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
519 | cFYI(1, ("Error decoding negTokenInit")); | 544 | cFYI(1, ("Error decoding negTokenInit")); |
520 | return 0; | 545 | return 0; |
@@ -526,6 +551,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
526 | return 0; | 551 | return 0; |
527 | } | 552 | } |
528 | 553 | ||
554 | /* negTokenInit */ | ||
529 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 555 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
530 | cFYI(1, ("Error decoding negTokenInit")); | 556 | cFYI(1, ("Error decoding negTokenInit")); |
531 | return 0; | 557 | return 0; |
@@ -537,6 +563,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
537 | return 0; | 563 | return 0; |
538 | } | 564 | } |
539 | 565 | ||
566 | /* sequence */ | ||
540 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 567 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
541 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); | 568 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); |
542 | return 0; | 569 | return 0; |
@@ -548,6 +575,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
548 | return 0; | 575 | return 0; |
549 | } | 576 | } |
550 | 577 | ||
578 | /* sequence of */ | ||
551 | if (asn1_header_decode | 579 | if (asn1_header_decode |
552 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { | 580 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { |
553 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); | 581 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); |
@@ -560,6 +588,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
560 | return 0; | 588 | return 0; |
561 | } | 589 | } |
562 | 590 | ||
591 | /* list of security mechanisms */ | ||
563 | while (!asn1_eoc_decode(&ctx, sequence_end)) { | 592 | while (!asn1_eoc_decode(&ctx, sequence_end)) { |
564 | rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); | 593 | rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); |
565 | if (!rc) { | 594 | if (!rc) { |
@@ -576,11 +605,15 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
576 | 605 | ||
577 | if (compare_oid(oid, oidlen, MSKRB5_OID, | 606 | if (compare_oid(oid, oidlen, MSKRB5_OID, |
578 | MSKRB5_OID_LEN) && | 607 | MSKRB5_OID_LEN) && |
579 | !use_kerberos) | 608 | !use_mskerberos) |
580 | use_mskerberos = true; | 609 | use_mskerberos = true; |
610 | else if (compare_oid(oid, oidlen, KRB5U2U_OID, | ||
611 | KRB5U2U_OID_LEN) && | ||
612 | !use_kerberosu2u) | ||
613 | use_kerberosu2u = true; | ||
581 | else if (compare_oid(oid, oidlen, KRB5_OID, | 614 | else if (compare_oid(oid, oidlen, KRB5_OID, |
582 | KRB5_OID_LEN) && | 615 | KRB5_OID_LEN) && |
583 | !use_mskerberos) | 616 | !use_kerberos) |
584 | use_kerberos = true; | 617 | use_kerberos = true; |
585 | else if (compare_oid(oid, oidlen, NTLMSSP_OID, | 618 | else if (compare_oid(oid, oidlen, NTLMSSP_OID, |
586 | NTLMSSP_OID_LEN)) | 619 | NTLMSSP_OID_LEN)) |
@@ -593,7 +626,12 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
593 | } | 626 | } |
594 | } | 627 | } |
595 | 628 | ||
629 | /* mechlistMIC */ | ||
596 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 630 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
631 | /* Check if we have reached the end of the blob, but with | ||
632 | no mechListMic (e.g. NTLMSSP instead of KRB5) */ | ||
633 | if (ctx.error == ASN1_ERR_DEC_EMPTY) | ||
634 | goto decode_negtoken_exit; | ||
597 | cFYI(1, ("Error decoding last part negTokenInit exit3")); | 635 | cFYI(1, ("Error decoding last part negTokenInit exit3")); |
598 | return 0; | 636 | return 0; |
599 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | 637 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { |
@@ -602,6 +640,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
602 | cls, con, tag, end, *end)); | 640 | cls, con, tag, end, *end)); |
603 | return 0; | 641 | return 0; |
604 | } | 642 | } |
643 | |||
644 | /* sequence */ | ||
605 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 645 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
606 | cFYI(1, ("Error decoding last part negTokenInit exit5")); | 646 | cFYI(1, ("Error decoding last part negTokenInit exit5")); |
607 | return 0; | 647 | return 0; |
@@ -611,6 +651,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
611 | cls, con, tag, end, *end)); | 651 | cls, con, tag, end, *end)); |
612 | } | 652 | } |
613 | 653 | ||
654 | /* sequence of */ | ||
614 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 655 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
615 | cFYI(1, ("Error decoding last part negTokenInit exit 7")); | 656 | cFYI(1, ("Error decoding last part negTokenInit exit 7")); |
616 | return 0; | 657 | return 0; |
@@ -619,6 +660,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
619 | cls, con, tag, end, *end)); | 660 | cls, con, tag, end, *end)); |
620 | return 0; | 661 | return 0; |
621 | } | 662 | } |
663 | |||
664 | /* general string */ | ||
622 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 665 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
623 | cFYI(1, ("Error decoding last part negTokenInit exit9")); | 666 | cFYI(1, ("Error decoding last part negTokenInit exit9")); |
624 | return 0; | 667 | return 0; |
@@ -630,13 +673,13 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
630 | } | 673 | } |
631 | cFYI(1, ("Need to call asn1_octets_decode() function for %s", | 674 | cFYI(1, ("Need to call asn1_octets_decode() function for %s", |
632 | ctx.pointer)); /* is this UTF-8 or ASCII? */ | 675 | ctx.pointer)); /* is this UTF-8 or ASCII? */ |
633 | 676 | decode_negtoken_exit: | |
634 | if (use_kerberos) | 677 | if (use_kerberos) |
635 | *secType = Kerberos; | 678 | *secType = Kerberos; |
636 | else if (use_mskerberos) | 679 | else if (use_mskerberos) |
637 | *secType = MSKerberos; | 680 | *secType = MSKerberos; |
638 | else if (use_ntlmssp) | 681 | else if (use_ntlmssp) |
639 | *secType = NTLMSSP; | 682 | *secType = RawNTLMSSP; |
640 | 683 | ||
641 | return 1; | 684 | return 1; |
642 | } | 685 | } |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 0d92114195ab..9f669f982c4d 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -333,6 +333,27 @@ cifs_destroy_inode(struct inode *inode) | |||
333 | kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); | 333 | kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void | ||
337 | cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) | ||
338 | { | ||
339 | seq_printf(s, ",addr="); | ||
340 | |||
341 | switch (server->addr.sockAddr.sin_family) { | ||
342 | case AF_INET: | ||
343 | seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr); | ||
344 | break; | ||
345 | case AF_INET6: | ||
346 | seq_printf(s, "%pI6", | ||
347 | &server->addr.sockAddr6.sin6_addr.s6_addr); | ||
348 | if (server->addr.sockAddr6.sin6_scope_id) | ||
349 | seq_printf(s, "%%%u", | ||
350 | server->addr.sockAddr6.sin6_scope_id); | ||
351 | break; | ||
352 | default: | ||
353 | seq_printf(s, "(unknown)"); | ||
354 | } | ||
355 | } | ||
356 | |||
336 | /* | 357 | /* |
337 | * cifs_show_options() is for displaying mount options in /proc/mounts. | 358 | * cifs_show_options() is for displaying mount options in /proc/mounts. |
338 | * Not all settable options are displayed but most of the important | 359 | * Not all settable options are displayed but most of the important |
@@ -343,83 +364,64 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
343 | { | 364 | { |
344 | struct cifs_sb_info *cifs_sb; | 365 | struct cifs_sb_info *cifs_sb; |
345 | struct cifsTconInfo *tcon; | 366 | struct cifsTconInfo *tcon; |
346 | struct TCP_Server_Info *server; | ||
347 | 367 | ||
348 | cifs_sb = CIFS_SB(m->mnt_sb); | 368 | cifs_sb = CIFS_SB(m->mnt_sb); |
369 | tcon = cifs_sb->tcon; | ||
349 | 370 | ||
350 | if (cifs_sb) { | 371 | seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); |
351 | tcon = cifs_sb->tcon; | 372 | if (tcon->ses->userName) |
352 | if (tcon) { | 373 | seq_printf(s, ",username=%s", tcon->ses->userName); |
353 | seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); | 374 | if (tcon->ses->domainName) |
354 | if (tcon->ses) { | 375 | seq_printf(s, ",domain=%s", tcon->ses->domainName); |
355 | if (tcon->ses->userName) | 376 | |
356 | seq_printf(s, ",username=%s", | 377 | seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); |
357 | tcon->ses->userName); | 378 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) |
358 | if (tcon->ses->domainName) | 379 | seq_printf(s, ",forceuid"); |
359 | seq_printf(s, ",domain=%s", | 380 | |
360 | tcon->ses->domainName); | 381 | seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); |
361 | server = tcon->ses->server; | 382 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) |
362 | if (server) { | 383 | seq_printf(s, ",forcegid"); |
363 | seq_printf(s, ",addr="); | 384 | |
364 | switch (server->addr.sockAddr6. | 385 | cifs_show_address(s, tcon->ses->server); |
365 | sin6_family) { | 386 | |
366 | case AF_INET6: | 387 | if (!tcon->unix_ext) |
367 | seq_printf(s, "%pI6", | 388 | seq_printf(s, ",file_mode=0%o,dir_mode=0%o", |
368 | &server->addr.sockAddr6.sin6_addr); | ||
369 | break; | ||
370 | case AF_INET: | ||
371 | seq_printf(s, "%pI4", | ||
372 | &server->addr.sockAddr.sin_addr.s_addr); | ||
373 | break; | ||
374 | } | ||
375 | } | ||
376 | } | ||
377 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) || | ||
378 | !(tcon->unix_ext)) | ||
379 | seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); | ||
380 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) || | ||
381 | !(tcon->unix_ext)) | ||
382 | seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); | ||
383 | if (!tcon->unix_ext) { | ||
384 | seq_printf(s, ",file_mode=0%o,dir_mode=0%o", | ||
385 | cifs_sb->mnt_file_mode, | 389 | cifs_sb->mnt_file_mode, |
386 | cifs_sb->mnt_dir_mode); | 390 | cifs_sb->mnt_dir_mode); |
387 | } | 391 | if (tcon->seal) |
388 | if (tcon->seal) | 392 | seq_printf(s, ",seal"); |
389 | seq_printf(s, ",seal"); | 393 | if (tcon->nocase) |
390 | if (tcon->nocase) | 394 | seq_printf(s, ",nocase"); |
391 | seq_printf(s, ",nocase"); | 395 | if (tcon->retry) |
392 | if (tcon->retry) | 396 | seq_printf(s, ",hard"); |
393 | seq_printf(s, ",hard"); | 397 | if (cifs_sb->prepath) |
394 | } | 398 | seq_printf(s, ",prepath=%s", cifs_sb->prepath); |
395 | if (cifs_sb->prepath) | 399 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) |
396 | seq_printf(s, ",prepath=%s", cifs_sb->prepath); | 400 | seq_printf(s, ",posixpaths"); |
397 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) | 401 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) |
398 | seq_printf(s, ",posixpaths"); | 402 | seq_printf(s, ",setuids"); |
399 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) | 403 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) |
400 | seq_printf(s, ",setuids"); | 404 | seq_printf(s, ",serverino"); |
401 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) | 405 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) |
402 | seq_printf(s, ",serverino"); | 406 | seq_printf(s, ",directio"); |
403 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) | 407 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) |
404 | seq_printf(s, ",directio"); | 408 | seq_printf(s, ",nouser_xattr"); |
405 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | 409 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) |
406 | seq_printf(s, ",nouser_xattr"); | 410 | seq_printf(s, ",mapchars"); |
407 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) | 411 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) |
408 | seq_printf(s, ",mapchars"); | 412 | seq_printf(s, ",sfu"); |
409 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) | 413 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) |
410 | seq_printf(s, ",sfu"); | 414 | seq_printf(s, ",nobrl"); |
411 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | 415 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) |
412 | seq_printf(s, ",nobrl"); | 416 | seq_printf(s, ",cifsacl"); |
413 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) | 417 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) |
414 | seq_printf(s, ",cifsacl"); | 418 | seq_printf(s, ",dynperm"); |
415 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) | 419 | if (m->mnt_sb->s_flags & MS_POSIXACL) |
416 | seq_printf(s, ",dynperm"); | 420 | seq_printf(s, ",acl"); |
417 | if (m->mnt_sb->s_flags & MS_POSIXACL) | 421 | |
418 | seq_printf(s, ",acl"); | 422 | seq_printf(s, ",rsize=%d", cifs_sb->rsize); |
419 | 423 | seq_printf(s, ",wsize=%d", cifs_sb->wsize); | |
420 | seq_printf(s, ",rsize=%d", cifs_sb->rsize); | 424 | |
421 | seq_printf(s, ",wsize=%d", cifs_sb->wsize); | ||
422 | } | ||
423 | return 0; | 425 | return 0; |
424 | } | 426 | } |
425 | 427 | ||
@@ -535,9 +537,14 @@ static void cifs_umount_begin(struct super_block *sb) | |||
535 | if (tcon == NULL) | 537 | if (tcon == NULL) |
536 | return; | 538 | return; |
537 | 539 | ||
538 | lock_kernel(); | ||
539 | read_lock(&cifs_tcp_ses_lock); | 540 | read_lock(&cifs_tcp_ses_lock); |
540 | if (tcon->tc_count == 1) | 541 | if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { |
542 | /* we have other mounts to same share or we have | ||
543 | already tried to force umount this and woken up | ||
544 | all waiting network requests, nothing to do */ | ||
545 | read_unlock(&cifs_tcp_ses_lock); | ||
546 | return; | ||
547 | } else if (tcon->tc_count == 1) | ||
541 | tcon->tidStatus = CifsExiting; | 548 | tcon->tidStatus = CifsExiting; |
542 | read_unlock(&cifs_tcp_ses_lock); | 549 | read_unlock(&cifs_tcp_ses_lock); |
543 | 550 | ||
@@ -552,9 +559,7 @@ static void cifs_umount_begin(struct super_block *sb) | |||
552 | wake_up_all(&tcon->ses->server->response_q); | 559 | wake_up_all(&tcon->ses->server->response_q); |
553 | msleep(1); | 560 | msleep(1); |
554 | } | 561 | } |
555 | /* BB FIXME - finish add checks for tidStatus BB */ | ||
556 | 562 | ||
557 | unlock_kernel(); | ||
558 | return; | 563 | return; |
559 | } | 564 | } |
560 | 565 | ||
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a61ab772c6f6..e1225e6ded2f 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -83,7 +83,7 @@ enum securityEnum { | |||
83 | NTLM, /* Legacy NTLM012 auth with NTLM hash */ | 83 | NTLM, /* Legacy NTLM012 auth with NTLM hash */ |
84 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ | 84 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ |
85 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ | 85 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ |
86 | NTLMSSP, /* NTLMSSP via SPNEGO, NTLMv2 hash */ | 86 | /* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */ |
87 | Kerberos, /* Kerberos via SPNEGO */ | 87 | Kerberos, /* Kerberos via SPNEGO */ |
88 | MSKerberos, /* MS Kerberos via SPNEGO */ | 88 | MSKerberos, /* MS Kerberos via SPNEGO */ |
89 | }; | 89 | }; |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f9452329bcce..c419416a42ee 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -74,7 +74,7 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr); | |||
74 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); | 74 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); |
75 | extern int decode_negTokenInit(unsigned char *security_blob, int length, | 75 | extern int decode_negTokenInit(unsigned char *security_blob, int length, |
76 | enum securityEnum *secType); | 76 | enum securityEnum *secType); |
77 | extern int cifs_inet_pton(const int, const char *source, void *dst); | 77 | extern int cifs_convert_address(char *src, void *dst); |
78 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); | 78 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); |
79 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 79 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
80 | const struct cifsTconInfo *, int /* length of | 80 | const struct cifsTconInfo *, int /* length of |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b84c61d5bca4..61007c627497 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -594,7 +594,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
594 | else if (secFlags & CIFSSEC_MAY_KRB5) | 594 | else if (secFlags & CIFSSEC_MAY_KRB5) |
595 | server->secType = Kerberos; | 595 | server->secType = Kerberos; |
596 | else if (secFlags & CIFSSEC_MAY_NTLMSSP) | 596 | else if (secFlags & CIFSSEC_MAY_NTLMSSP) |
597 | server->secType = NTLMSSP; | 597 | server->secType = RawNTLMSSP; |
598 | else if (secFlags & CIFSSEC_MAY_LANMAN) | 598 | else if (secFlags & CIFSSEC_MAY_LANMAN) |
599 | server->secType = LANMAN; | 599 | server->secType = LANMAN; |
600 | /* #ifdef CONFIG_CIFS_EXPERIMENTAL | 600 | /* #ifdef CONFIG_CIFS_EXPERIMENTAL |
@@ -729,7 +729,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) | |||
729 | * the tcon is no longer on the list, so no need to take lock before | 729 | * the tcon is no longer on the list, so no need to take lock before |
730 | * checking this. | 730 | * checking this. |
731 | */ | 731 | */ |
732 | if (tcon->need_reconnect) | 732 | if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) |
733 | return 0; | 733 | return 0; |
734 | 734 | ||
735 | rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, | 735 | rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 97f4311b9a8e..e16d7592116a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -70,7 +70,6 @@ struct smb_vol { | |||
70 | mode_t file_mode; | 70 | mode_t file_mode; |
71 | mode_t dir_mode; | 71 | mode_t dir_mode; |
72 | unsigned secFlg; | 72 | unsigned secFlg; |
73 | bool rw:1; | ||
74 | bool retry:1; | 73 | bool retry:1; |
75 | bool intr:1; | 74 | bool intr:1; |
76 | bool setuids:1; | 75 | bool setuids:1; |
@@ -832,7 +831,6 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
832 | vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; | 831 | vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; |
833 | 832 | ||
834 | /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ | 833 | /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ |
835 | vol->rw = true; | ||
836 | /* default is always to request posix paths. */ | 834 | /* default is always to request posix paths. */ |
837 | vol->posix_paths = 1; | 835 | vol->posix_paths = 1; |
838 | /* default to using server inode numbers where available */ | 836 | /* default to using server inode numbers where available */ |
@@ -1199,7 +1197,9 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1199 | } else if (strnicmp(data, "guest", 5) == 0) { | 1197 | } else if (strnicmp(data, "guest", 5) == 0) { |
1200 | /* ignore */ | 1198 | /* ignore */ |
1201 | } else if (strnicmp(data, "rw", 2) == 0) { | 1199 | } else if (strnicmp(data, "rw", 2) == 0) { |
1202 | vol->rw = true; | 1200 | /* ignore */ |
1201 | } else if (strnicmp(data, "ro", 2) == 0) { | ||
1202 | /* ignore */ | ||
1203 | } else if (strnicmp(data, "noblocksend", 11) == 0) { | 1203 | } else if (strnicmp(data, "noblocksend", 11) == 0) { |
1204 | vol->noblocksnd = 1; | 1204 | vol->noblocksnd = 1; |
1205 | } else if (strnicmp(data, "noautotune", 10) == 0) { | 1205 | } else if (strnicmp(data, "noautotune", 10) == 0) { |
@@ -1218,8 +1218,6 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1218 | parse these options again and set anything and it | 1218 | parse these options again and set anything and it |
1219 | is ok to just ignore them */ | 1219 | is ok to just ignore them */ |
1220 | continue; | 1220 | continue; |
1221 | } else if (strnicmp(data, "ro", 2) == 0) { | ||
1222 | vol->rw = false; | ||
1223 | } else if (strnicmp(data, "hard", 4) == 0) { | 1221 | } else if (strnicmp(data, "hard", 4) == 0) { |
1224 | vol->retry = 1; | 1222 | vol->retry = 1; |
1225 | } else if (strnicmp(data, "soft", 4) == 0) { | 1223 | } else if (strnicmp(data, "soft", 4) == 0) { |
@@ -1386,8 +1384,10 @@ cifs_find_tcp_session(struct sockaddr_storage *addr) | |||
1386 | server->addr.sockAddr.sin_addr.s_addr)) | 1384 | server->addr.sockAddr.sin_addr.s_addr)) |
1387 | continue; | 1385 | continue; |
1388 | else if (addr->ss_family == AF_INET6 && | 1386 | else if (addr->ss_family == AF_INET6 && |
1389 | !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr, | 1387 | (!ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr, |
1390 | &addr6->sin6_addr)) | 1388 | &addr6->sin6_addr) || |
1389 | server->addr.sockAddr6.sin6_scope_id != | ||
1390 | addr6->sin6_scope_id)) | ||
1391 | continue; | 1391 | continue; |
1392 | 1392 | ||
1393 | ++server->srv_count; | 1393 | ++server->srv_count; |
@@ -1433,28 +1433,15 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1433 | 1433 | ||
1434 | memset(&addr, 0, sizeof(struct sockaddr_storage)); | 1434 | memset(&addr, 0, sizeof(struct sockaddr_storage)); |
1435 | 1435 | ||
1436 | if (volume_info->UNCip && volume_info->UNC) { | 1436 | cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip)); |
1437 | rc = cifs_inet_pton(AF_INET, volume_info->UNCip, | ||
1438 | &sin_server->sin_addr.s_addr); | ||
1439 | |||
1440 | if (rc <= 0) { | ||
1441 | /* not ipv4 address, try ipv6 */ | ||
1442 | rc = cifs_inet_pton(AF_INET6, volume_info->UNCip, | ||
1443 | &sin_server6->sin6_addr.in6_u); | ||
1444 | if (rc > 0) | ||
1445 | addr.ss_family = AF_INET6; | ||
1446 | } else { | ||
1447 | addr.ss_family = AF_INET; | ||
1448 | } | ||
1449 | 1437 | ||
1450 | if (rc <= 0) { | 1438 | if (volume_info->UNCip && volume_info->UNC) { |
1439 | rc = cifs_convert_address(volume_info->UNCip, &addr); | ||
1440 | if (!rc) { | ||
1451 | /* we failed translating address */ | 1441 | /* we failed translating address */ |
1452 | rc = -EINVAL; | 1442 | rc = -EINVAL; |
1453 | goto out_err; | 1443 | goto out_err; |
1454 | } | 1444 | } |
1455 | |||
1456 | cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, | ||
1457 | volume_info->UNCip)); | ||
1458 | } else if (volume_info->UNCip) { | 1445 | } else if (volume_info->UNCip) { |
1459 | /* BB using ip addr as tcp_ses name to connect to the | 1446 | /* BB using ip addr as tcp_ses name to connect to the |
1460 | DFS root below */ | 1447 | DFS root below */ |
@@ -1513,14 +1500,14 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1513 | cFYI(1, ("attempting ipv6 connect")); | 1500 | cFYI(1, ("attempting ipv6 connect")); |
1514 | /* BB should we allow ipv6 on port 139? */ | 1501 | /* BB should we allow ipv6 on port 139? */ |
1515 | /* other OS never observed in Wild doing 139 with v6 */ | 1502 | /* other OS never observed in Wild doing 139 with v6 */ |
1503 | sin_server6->sin6_port = htons(volume_info->port); | ||
1516 | memcpy(&tcp_ses->addr.sockAddr6, sin_server6, | 1504 | memcpy(&tcp_ses->addr.sockAddr6, sin_server6, |
1517 | sizeof(struct sockaddr_in6)); | 1505 | sizeof(struct sockaddr_in6)); |
1518 | sin_server6->sin6_port = htons(volume_info->port); | ||
1519 | rc = ipv6_connect(tcp_ses); | 1506 | rc = ipv6_connect(tcp_ses); |
1520 | } else { | 1507 | } else { |
1508 | sin_server->sin_port = htons(volume_info->port); | ||
1521 | memcpy(&tcp_ses->addr.sockAddr, sin_server, | 1509 | memcpy(&tcp_ses->addr.sockAddr, sin_server, |
1522 | sizeof(struct sockaddr_in)); | 1510 | sizeof(struct sockaddr_in)); |
1523 | sin_server->sin_port = htons(volume_info->port); | ||
1524 | rc = ipv4_connect(tcp_ses); | 1511 | rc = ipv4_connect(tcp_ses); |
1525 | } | 1512 | } |
1526 | if (rc < 0) { | 1513 | if (rc < 0) { |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 3758965d73d5..7dc6b74f9def 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -307,8 +307,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
307 | 307 | ||
308 | full_path = build_path_from_dentry(direntry); | 308 | full_path = build_path_from_dentry(direntry); |
309 | if (full_path == NULL) { | 309 | if (full_path == NULL) { |
310 | rc = -ENOMEM; | ||
310 | FreeXid(xid); | 311 | FreeXid(xid); |
311 | return -ENOMEM; | 312 | return rc; |
312 | } | 313 | } |
313 | 314 | ||
314 | if (oplockEnabled) | 315 | if (oplockEnabled) |
@@ -540,8 +541,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
540 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); | 541 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); |
541 | if (buf == NULL) { | 542 | if (buf == NULL) { |
542 | kfree(full_path); | 543 | kfree(full_path); |
544 | rc = -ENOMEM; | ||
543 | FreeXid(xid); | 545 | FreeXid(xid); |
544 | return -ENOMEM; | 546 | return rc; |
545 | } | 547 | } |
546 | 548 | ||
547 | rc = CIFSSMBOpen(xid, pTcon, full_path, | 549 | rc = CIFSSMBOpen(xid, pTcon, full_path, |
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index df4a306f697e..87948147d7ec 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c | |||
@@ -35,26 +35,11 @@ | |||
35 | * 0 - name is not IP | 35 | * 0 - name is not IP |
36 | */ | 36 | */ |
37 | static int | 37 | static int |
38 | is_ip(const char *name) | 38 | is_ip(char *name) |
39 | { | 39 | { |
40 | int rc; | 40 | struct sockaddr_storage ss; |
41 | struct sockaddr_in sin_server; | 41 | |
42 | struct sockaddr_in6 sin_server6; | 42 | return cifs_convert_address(name, &ss); |
43 | |||
44 | rc = cifs_inet_pton(AF_INET, name, | ||
45 | &sin_server.sin_addr.s_addr); | ||
46 | |||
47 | if (rc <= 0) { | ||
48 | /* not ipv4 address, try ipv6 */ | ||
49 | rc = cifs_inet_pton(AF_INET6, name, | ||
50 | &sin_server6.sin6_addr.in6_u); | ||
51 | if (rc > 0) | ||
52 | return 1; | ||
53 | } else { | ||
54 | return 1; | ||
55 | } | ||
56 | /* we failed translating address */ | ||
57 | return 0; | ||
58 | } | 43 | } |
59 | 44 | ||
60 | static int | 45 | static int |
@@ -72,7 +57,7 @@ dns_resolver_instantiate(struct key *key, const void *data, | |||
72 | ip[datalen] = '\0'; | 57 | ip[datalen] = '\0'; |
73 | 58 | ||
74 | /* make sure this looks like an address */ | 59 | /* make sure this looks like an address */ |
75 | if (!is_ip((const char *) ip)) { | 60 | if (!is_ip(ip)) { |
76 | kfree(ip); | 61 | kfree(ip); |
77 | return -EINVAL; | 62 | return -EINVAL; |
78 | } | 63 | } |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 06866841b97f..97ce4bf89d15 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -300,14 +300,16 @@ int cifs_open(struct inode *inode, struct file *file) | |||
300 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); | 300 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); |
301 | pCifsFile = cifs_fill_filedata(file); | 301 | pCifsFile = cifs_fill_filedata(file); |
302 | if (pCifsFile) { | 302 | if (pCifsFile) { |
303 | rc = 0; | ||
303 | FreeXid(xid); | 304 | FreeXid(xid); |
304 | return 0; | 305 | return rc; |
305 | } | 306 | } |
306 | 307 | ||
307 | full_path = build_path_from_dentry(file->f_path.dentry); | 308 | full_path = build_path_from_dentry(file->f_path.dentry); |
308 | if (full_path == NULL) { | 309 | if (full_path == NULL) { |
310 | rc = -ENOMEM; | ||
309 | FreeXid(xid); | 311 | FreeXid(xid); |
310 | return -ENOMEM; | 312 | return rc; |
311 | } | 313 | } |
312 | 314 | ||
313 | cFYI(1, ("inode = 0x%p file flags are 0x%x for %s", | 315 | cFYI(1, ("inode = 0x%p file flags are 0x%x for %s", |
@@ -491,11 +493,12 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
491 | return -EBADF; | 493 | return -EBADF; |
492 | 494 | ||
493 | xid = GetXid(); | 495 | xid = GetXid(); |
494 | mutex_unlock(&pCifsFile->fh_mutex); | 496 | mutex_lock(&pCifsFile->fh_mutex); |
495 | if (!pCifsFile->invalidHandle) { | 497 | if (!pCifsFile->invalidHandle) { |
496 | mutex_lock(&pCifsFile->fh_mutex); | 498 | mutex_unlock(&pCifsFile->fh_mutex); |
499 | rc = 0; | ||
497 | FreeXid(xid); | 500 | FreeXid(xid); |
498 | return 0; | 501 | return rc; |
499 | } | 502 | } |
500 | 503 | ||
501 | if (file->f_path.dentry == NULL) { | 504 | if (file->f_path.dentry == NULL) { |
@@ -524,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
524 | if (full_path == NULL) { | 527 | if (full_path == NULL) { |
525 | rc = -ENOMEM; | 528 | rc = -ENOMEM; |
526 | reopen_error_exit: | 529 | reopen_error_exit: |
527 | mutex_lock(&pCifsFile->fh_mutex); | 530 | mutex_unlock(&pCifsFile->fh_mutex); |
528 | FreeXid(xid); | 531 | FreeXid(xid); |
529 | return rc; | 532 | return rc; |
530 | } | 533 | } |
@@ -566,14 +569,14 @@ reopen_error_exit: | |||
566 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | 569 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & |
567 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 570 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
568 | if (rc) { | 571 | if (rc) { |
569 | mutex_lock(&pCifsFile->fh_mutex); | 572 | mutex_unlock(&pCifsFile->fh_mutex); |
570 | cFYI(1, ("cifs_open returned 0x%x", rc)); | 573 | cFYI(1, ("cifs_open returned 0x%x", rc)); |
571 | cFYI(1, ("oplock: %d", oplock)); | 574 | cFYI(1, ("oplock: %d", oplock)); |
572 | } else { | 575 | } else { |
573 | reopen_success: | 576 | reopen_success: |
574 | pCifsFile->netfid = netfid; | 577 | pCifsFile->netfid = netfid; |
575 | pCifsFile->invalidHandle = false; | 578 | pCifsFile->invalidHandle = false; |
576 | mutex_lock(&pCifsFile->fh_mutex); | 579 | mutex_unlock(&pCifsFile->fh_mutex); |
577 | pCifsInode = CIFS_I(inode); | 580 | pCifsInode = CIFS_I(inode); |
578 | if (pCifsInode) { | 581 | if (pCifsInode) { |
579 | if (can_flush) { | 582 | if (can_flush) { |
@@ -845,8 +848,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
845 | tcon = cifs_sb->tcon; | 848 | tcon = cifs_sb->tcon; |
846 | 849 | ||
847 | if (file->private_data == NULL) { | 850 | if (file->private_data == NULL) { |
851 | rc = -EBADF; | ||
848 | FreeXid(xid); | 852 | FreeXid(xid); |
849 | return -EBADF; | 853 | return rc; |
850 | } | 854 | } |
851 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; | 855 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; |
852 | 856 | ||
@@ -1805,8 +1809,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1805 | pTcon = cifs_sb->tcon; | 1809 | pTcon = cifs_sb->tcon; |
1806 | 1810 | ||
1807 | if (file->private_data == NULL) { | 1811 | if (file->private_data == NULL) { |
1812 | rc = -EBADF; | ||
1808 | FreeXid(xid); | 1813 | FreeXid(xid); |
1809 | return -EBADF; | 1814 | return rc; |
1810 | } | 1815 | } |
1811 | open_file = (struct cifsFileInfo *)file->private_data; | 1816 | open_file = (struct cifsFileInfo *)file->private_data; |
1812 | 1817 | ||
@@ -1885,8 +1890,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1885 | pTcon = cifs_sb->tcon; | 1890 | pTcon = cifs_sb->tcon; |
1886 | 1891 | ||
1887 | if (file->private_data == NULL) { | 1892 | if (file->private_data == NULL) { |
1893 | rc = -EBADF; | ||
1888 | FreeXid(xid); | 1894 | FreeXid(xid); |
1889 | return -EBADF; | 1895 | return rc; |
1890 | } | 1896 | } |
1891 | open_file = (struct cifsFileInfo *)file->private_data; | 1897 | open_file = (struct cifsFileInfo *)file->private_data; |
1892 | 1898 | ||
@@ -2019,8 +2025,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2019 | 2025 | ||
2020 | xid = GetXid(); | 2026 | xid = GetXid(); |
2021 | if (file->private_data == NULL) { | 2027 | if (file->private_data == NULL) { |
2028 | rc = -EBADF; | ||
2022 | FreeXid(xid); | 2029 | FreeXid(xid); |
2023 | return -EBADF; | 2030 | return rc; |
2024 | } | 2031 | } |
2025 | open_file = (struct cifsFileInfo *)file->private_data; | 2032 | open_file = (struct cifsFileInfo *)file->private_data; |
2026 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 2033 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
@@ -2185,8 +2192,9 @@ static int cifs_readpage(struct file *file, struct page *page) | |||
2185 | xid = GetXid(); | 2192 | xid = GetXid(); |
2186 | 2193 | ||
2187 | if (file->private_data == NULL) { | 2194 | if (file->private_data == NULL) { |
2195 | rc = -EBADF; | ||
2188 | FreeXid(xid); | 2196 | FreeXid(xid); |
2189 | return -EBADF; | 2197 | return rc; |
2190 | } | 2198 | } |
2191 | 2199 | ||
2192 | cFYI(1, ("readpage %p at offset %d 0x%x\n", | 2200 | cFYI(1, ("readpage %p at offset %d 0x%x\n", |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index fad882b075ba..155c9e785d0c 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -988,8 +988,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) | |||
988 | * sb->s_vfs_rename_mutex here */ | 988 | * sb->s_vfs_rename_mutex here */ |
989 | full_path = build_path_from_dentry(dentry); | 989 | full_path = build_path_from_dentry(dentry); |
990 | if (full_path == NULL) { | 990 | if (full_path == NULL) { |
991 | rc = -ENOMEM; | ||
991 | FreeXid(xid); | 992 | FreeXid(xid); |
992 | return -ENOMEM; | 993 | return rc; |
993 | } | 994 | } |
994 | 995 | ||
995 | if ((tcon->ses->capabilities & CAP_UNIX) && | 996 | if ((tcon->ses->capabilities & CAP_UNIX) && |
@@ -1118,8 +1119,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1118 | 1119 | ||
1119 | full_path = build_path_from_dentry(direntry); | 1120 | full_path = build_path_from_dentry(direntry); |
1120 | if (full_path == NULL) { | 1121 | if (full_path == NULL) { |
1122 | rc = -ENOMEM; | ||
1121 | FreeXid(xid); | 1123 | FreeXid(xid); |
1122 | return -ENOMEM; | 1124 | return rc; |
1123 | } | 1125 | } |
1124 | 1126 | ||
1125 | if ((pTcon->ses->capabilities & CAP_UNIX) && | 1127 | if ((pTcon->ses->capabilities & CAP_UNIX) && |
@@ -1303,8 +1305,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
1303 | 1305 | ||
1304 | full_path = build_path_from_dentry(direntry); | 1306 | full_path = build_path_from_dentry(direntry); |
1305 | if (full_path == NULL) { | 1307 | if (full_path == NULL) { |
1308 | rc = -ENOMEM; | ||
1306 | FreeXid(xid); | 1309 | FreeXid(xid); |
1307 | return -ENOMEM; | 1310 | return rc; |
1308 | } | 1311 | } |
1309 | 1312 | ||
1310 | rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, | 1313 | rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, |
@@ -1508,8 +1511,9 @@ int cifs_revalidate(struct dentry *direntry) | |||
1508 | since that would deadlock */ | 1511 | since that would deadlock */ |
1509 | full_path = build_path_from_dentry(direntry); | 1512 | full_path = build_path_from_dentry(direntry); |
1510 | if (full_path == NULL) { | 1513 | if (full_path == NULL) { |
1514 | rc = -ENOMEM; | ||
1511 | FreeXid(xid); | 1515 | FreeXid(xid); |
1512 | return -ENOMEM; | 1516 | return rc; |
1513 | } | 1517 | } |
1514 | cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " | 1518 | cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " |
1515 | "jiffies %ld", full_path, direntry->d_inode, | 1519 | "jiffies %ld", full_path, direntry->d_inode, |
@@ -1911,8 +1915,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) | |||
1911 | 1915 | ||
1912 | full_path = build_path_from_dentry(direntry); | 1916 | full_path = build_path_from_dentry(direntry); |
1913 | if (full_path == NULL) { | 1917 | if (full_path == NULL) { |
1918 | rc = -ENOMEM; | ||
1914 | FreeXid(xid); | 1919 | FreeXid(xid); |
1915 | return -ENOMEM; | 1920 | return rc; |
1916 | } | 1921 | } |
1917 | 1922 | ||
1918 | /* | 1923 | /* |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index cd83c53fcbb5..fc1e0487eaee 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -172,8 +172,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
172 | full_path = build_path_from_dentry(direntry); | 172 | full_path = build_path_from_dentry(direntry); |
173 | 173 | ||
174 | if (full_path == NULL) { | 174 | if (full_path == NULL) { |
175 | rc = -ENOMEM; | ||
175 | FreeXid(xid); | 176 | FreeXid(xid); |
176 | return -ENOMEM; | 177 | return rc; |
177 | } | 178 | } |
178 | 179 | ||
179 | cFYI(1, ("Full path: %s", full_path)); | 180 | cFYI(1, ("Full path: %s", full_path)); |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 32d6baa0a54f..bd6d6895730d 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -133,10 +133,12 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = { | |||
133 | {0, 0} | 133 | {0, 0} |
134 | }; | 134 | }; |
135 | 135 | ||
136 | /* Convert string containing dotted ip address to binary form */ | 136 | /* |
137 | /* returns 0 if invalid address */ | 137 | * Convert a string containing text IPv4 or IPv6 address to binary form. |
138 | 138 | * | |
139 | int | 139 | * Returns 0 on failure. |
140 | */ | ||
141 | static int | ||
140 | cifs_inet_pton(const int address_family, const char *cp, void *dst) | 142 | cifs_inet_pton(const int address_family, const char *cp, void *dst) |
141 | { | 143 | { |
142 | int ret = 0; | 144 | int ret = 0; |
@@ -153,6 +155,52 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst) | |||
153 | return ret; | 155 | return ret; |
154 | } | 156 | } |
155 | 157 | ||
158 | /* | ||
159 | * Try to convert a string to an IPv4 address and then attempt to convert | ||
160 | * it to an IPv6 address if that fails. Set the family field if either | ||
161 | * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to | ||
162 | * treat the part following it as a numeric sin6_scope_id. | ||
163 | * | ||
164 | * Returns 0 on failure. | ||
165 | */ | ||
166 | int | ||
167 | cifs_convert_address(char *src, void *dst) | ||
168 | { | ||
169 | int rc; | ||
170 | char *pct, *endp; | ||
171 | struct sockaddr_in *s4 = (struct sockaddr_in *) dst; | ||
172 | struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; | ||
173 | |||
174 | /* IPv4 address */ | ||
175 | if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) { | ||
176 | s4->sin_family = AF_INET; | ||
177 | return 1; | ||
178 | } | ||
179 | |||
180 | /* temporarily terminate string */ | ||
181 | pct = strchr(src, '%'); | ||
182 | if (pct) | ||
183 | *pct = '\0'; | ||
184 | |||
185 | rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr); | ||
186 | |||
187 | /* repair temp termination (if any) and make pct point to scopeid */ | ||
188 | if (pct) | ||
189 | *pct++ = '%'; | ||
190 | |||
191 | if (!rc) | ||
192 | return rc; | ||
193 | |||
194 | s6->sin6_family = AF_INET6; | ||
195 | if (pct) { | ||
196 | s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); | ||
197 | if (!*pct || *endp) | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | return rc; | ||
202 | } | ||
203 | |||
156 | /***************************************************************************** | 204 | /***************************************************************************** |
157 | convert a NT status code to a dos class/code | 205 | convert a NT status code to a dos class/code |
158 | *****************************************************************************/ | 206 | *****************************************************************************/ |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 897a052270f9..7085a6275c4c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -802,7 +802,7 @@ ssetup_ntlmssp_authenticate: | |||
802 | #endif /* CONFIG_CIFS_UPCALL */ | 802 | #endif /* CONFIG_CIFS_UPCALL */ |
803 | } else { | 803 | } else { |
804 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 804 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
805 | if ((experimEnabled > 1) && (type == RawNTLMSSP)) { | 805 | if (type == RawNTLMSSP) { |
806 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { | 806 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { |
807 | cERROR(1, ("NTLMSSP requires Unicode support")); | 807 | cERROR(1, ("NTLMSSP requires Unicode support")); |
808 | rc = -ENOSYS; | 808 | rc = -ENOSYS; |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index e9527eedc639..a75afa3dd9e1 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -64,8 +64,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name) | |||
64 | 64 | ||
65 | full_path = build_path_from_dentry(direntry); | 65 | full_path = build_path_from_dentry(direntry); |
66 | if (full_path == NULL) { | 66 | if (full_path == NULL) { |
67 | rc = -ENOMEM; | ||
67 | FreeXid(xid); | 68 | FreeXid(xid); |
68 | return -ENOMEM; | 69 | return rc; |
69 | } | 70 | } |
70 | if (ea_name == NULL) { | 71 | if (ea_name == NULL) { |
71 | cFYI(1, ("Null xattr names not supported")); | 72 | cFYI(1, ("Null xattr names not supported")); |
@@ -118,8 +119,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
118 | 119 | ||
119 | full_path = build_path_from_dentry(direntry); | 120 | full_path = build_path_from_dentry(direntry); |
120 | if (full_path == NULL) { | 121 | if (full_path == NULL) { |
122 | rc = -ENOMEM; | ||
121 | FreeXid(xid); | 123 | FreeXid(xid); |
122 | return -ENOMEM; | 124 | return rc; |
123 | } | 125 | } |
124 | /* return dos attributes as pseudo xattr */ | 126 | /* return dos attributes as pseudo xattr */ |
125 | /* return alt name if available as pseudo attr */ | 127 | /* return alt name if available as pseudo attr */ |
@@ -225,8 +227,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
225 | 227 | ||
226 | full_path = build_path_from_dentry(direntry); | 228 | full_path = build_path_from_dentry(direntry); |
227 | if (full_path == NULL) { | 229 | if (full_path == NULL) { |
230 | rc = -ENOMEM; | ||
228 | FreeXid(xid); | 231 | FreeXid(xid); |
229 | return -ENOMEM; | 232 | return rc; |
230 | } | 233 | } |
231 | /* return dos attributes as pseudo xattr */ | 234 | /* return dos attributes as pseudo xattr */ |
232 | /* return alt name if available as pseudo attr */ | 235 | /* return alt name if available as pseudo attr */ |
@@ -351,8 +354,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) | |||
351 | 354 | ||
352 | full_path = build_path_from_dentry(direntry); | 355 | full_path = build_path_from_dentry(direntry); |
353 | if (full_path == NULL) { | 356 | if (full_path == NULL) { |
357 | rc = -ENOMEM; | ||
354 | FreeXid(xid); | 358 | FreeXid(xid); |
355 | return -ENOMEM; | 359 | return rc; |
356 | } | 360 | } |
357 | /* return dos attributes as pseudo xattr */ | 361 | /* return dos attributes as pseudo xattr */ |
358 | /* return alt name if available as pseudo attr */ | 362 | /* return alt name if available as pseudo attr */ |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 3f0e1974abdc..31d12de83a2a 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -14,35 +14,44 @@ | |||
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/anon_inodes.h> | 16 | #include <linux/anon_inodes.h> |
17 | #include <linux/eventfd.h> | ||
18 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kref.h> | ||
20 | #include <linux/eventfd.h> | ||
20 | 21 | ||
21 | struct eventfd_ctx { | 22 | struct eventfd_ctx { |
23 | struct kref kref; | ||
22 | wait_queue_head_t wqh; | 24 | wait_queue_head_t wqh; |
23 | /* | 25 | /* |
24 | * Every time that a write(2) is performed on an eventfd, the | 26 | * Every time that a write(2) is performed on an eventfd, the |
25 | * value of the __u64 being written is added to "count" and a | 27 | * value of the __u64 being written is added to "count" and a |
26 | * wakeup is performed on "wqh". A read(2) will return the "count" | 28 | * wakeup is performed on "wqh". A read(2) will return the "count" |
27 | * value to userspace, and will reset "count" to zero. The kernel | 29 | * value to userspace, and will reset "count" to zero. The kernel |
28 | * size eventfd_signal() also, adds to the "count" counter and | 30 | * side eventfd_signal() also, adds to the "count" counter and |
29 | * issue a wakeup. | 31 | * issue a wakeup. |
30 | */ | 32 | */ |
31 | __u64 count; | 33 | __u64 count; |
32 | unsigned int flags; | 34 | unsigned int flags; |
33 | }; | 35 | }; |
34 | 36 | ||
35 | /* | 37 | /** |
36 | * Adds "n" to the eventfd counter "count". Returns "n" in case of | 38 | * eventfd_signal - Adds @n to the eventfd counter. |
37 | * success, or a value lower then "n" in case of coutner overflow. | 39 | * @ctx: [in] Pointer to the eventfd context. |
38 | * This function is supposed to be called by the kernel in paths | 40 | * @n: [in] Value of the counter to be added to the eventfd internal counter. |
39 | * that do not allow sleeping. In this function we allow the counter | 41 | * The value cannot be negative. |
40 | * to reach the ULLONG_MAX value, and we signal this as overflow | 42 | * |
41 | * condition by returining a POLLERR to poll(2). | 43 | * This function is supposed to be called by the kernel in paths that do not |
44 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX | ||
45 | * value, and we signal this as overflow condition by returining a POLLERR | ||
46 | * to poll(2). | ||
47 | * | ||
48 | * Returns @n in case of success, a non-negative number lower than @n in case | ||
49 | * of overflow, or the following error codes: | ||
50 | * | ||
51 | * -EINVAL : The value of @n is negative. | ||
42 | */ | 52 | */ |
43 | int eventfd_signal(struct file *file, int n) | 53 | int eventfd_signal(struct eventfd_ctx *ctx, int n) |
44 | { | 54 | { |
45 | struct eventfd_ctx *ctx = file->private_data; | ||
46 | unsigned long flags; | 55 | unsigned long flags; |
47 | 56 | ||
48 | if (n < 0) | 57 | if (n < 0) |
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n) | |||
59 | } | 68 | } |
60 | EXPORT_SYMBOL_GPL(eventfd_signal); | 69 | EXPORT_SYMBOL_GPL(eventfd_signal); |
61 | 70 | ||
71 | static void eventfd_free(struct kref *kref) | ||
72 | { | ||
73 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); | ||
74 | |||
75 | kfree(ctx); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * eventfd_ctx_get - Acquires a reference to the internal eventfd context. | ||
80 | * @ctx: [in] Pointer to the eventfd context. | ||
81 | * | ||
82 | * Returns: In case of success, returns a pointer to the eventfd context. | ||
83 | */ | ||
84 | struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) | ||
85 | { | ||
86 | kref_get(&ctx->kref); | ||
87 | return ctx; | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(eventfd_ctx_get); | ||
90 | |||
91 | /** | ||
92 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. | ||
93 | * @ctx: [in] Pointer to eventfd context. | ||
94 | * | ||
95 | * The eventfd context reference must have been previously acquired either | ||
96 | * with eventfd_ctx_get() or eventfd_ctx_fdget()). | ||
97 | */ | ||
98 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | ||
99 | { | ||
100 | kref_put(&ctx->kref, eventfd_free); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); | ||
103 | |||
62 | static int eventfd_release(struct inode *inode, struct file *file) | 104 | static int eventfd_release(struct inode *inode, struct file *file) |
63 | { | 105 | { |
64 | kfree(file->private_data); | 106 | struct eventfd_ctx *ctx = file->private_data; |
107 | |||
108 | wake_up_poll(&ctx->wqh, POLLHUP); | ||
109 | eventfd_ctx_put(ctx); | ||
65 | return 0; | 110 | return 0; |
66 | } | 111 | } |
67 | 112 | ||
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = { | |||
185 | .write = eventfd_write, | 230 | .write = eventfd_write, |
186 | }; | 231 | }; |
187 | 232 | ||
233 | /** | ||
234 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. | ||
235 | * @fd: [in] Eventfd file descriptor. | ||
236 | * | ||
237 | * Returns a pointer to the eventfd file structure in case of success, or the | ||
238 | * following error pointer: | ||
239 | * | ||
240 | * -EBADF : Invalid @fd file descriptor. | ||
241 | * -EINVAL : The @fd file descriptor is not an eventfd file. | ||
242 | */ | ||
188 | struct file *eventfd_fget(int fd) | 243 | struct file *eventfd_fget(int fd) |
189 | { | 244 | { |
190 | struct file *file; | 245 | struct file *file; |
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd) | |||
201 | } | 256 | } |
202 | EXPORT_SYMBOL_GPL(eventfd_fget); | 257 | EXPORT_SYMBOL_GPL(eventfd_fget); |
203 | 258 | ||
259 | /** | ||
260 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. | ||
261 | * @fd: [in] Eventfd file descriptor. | ||
262 | * | ||
263 | * Returns a pointer to the internal eventfd context, otherwise the error | ||
264 | * pointers returned by the following functions: | ||
265 | * | ||
266 | * eventfd_fget | ||
267 | */ | ||
268 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) | ||
269 | { | ||
270 | struct file *file; | ||
271 | struct eventfd_ctx *ctx; | ||
272 | |||
273 | file = eventfd_fget(fd); | ||
274 | if (IS_ERR(file)) | ||
275 | return (struct eventfd_ctx *) file; | ||
276 | ctx = eventfd_ctx_get(file->private_data); | ||
277 | fput(file); | ||
278 | |||
279 | return ctx; | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); | ||
282 | |||
283 | /** | ||
284 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. | ||
285 | * @file: [in] Eventfd file pointer. | ||
286 | * | ||
287 | * Returns a pointer to the internal eventfd context, otherwise the error | ||
288 | * pointer: | ||
289 | * | ||
290 | * -EINVAL : The @fd file descriptor is not an eventfd file. | ||
291 | */ | ||
292 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) | ||
293 | { | ||
294 | if (file->f_op != &eventfd_fops) | ||
295 | return ERR_PTR(-EINVAL); | ||
296 | |||
297 | return eventfd_ctx_get(file->private_data); | ||
298 | } | ||
299 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); | ||
300 | |||
204 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) | 301 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
205 | { | 302 | { |
206 | int fd; | 303 | int fd; |
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) | |||
217 | if (!ctx) | 314 | if (!ctx) |
218 | return -ENOMEM; | 315 | return -ENOMEM; |
219 | 316 | ||
317 | kref_init(&ctx->kref); | ||
220 | init_waitqueue_head(&ctx->wqh); | 318 | init_waitqueue_head(&ctx->wqh); |
221 | ctx->count = count; | 319 | ctx->count = count; |
222 | ctx->flags = flags; | 320 | ctx->flags = flags; |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 6524ecaebb7a..e1dedb0f7873 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str | |||
66 | inode = NULL; | 66 | inode = NULL; |
67 | if (ino) { | 67 | if (ino) { |
68 | inode = ext2_iget(dir->i_sb, ino); | 68 | inode = ext2_iget(dir->i_sb, ino); |
69 | if (IS_ERR(inode)) | 69 | if (unlikely(IS_ERR(inode))) { |
70 | return ERR_CAST(inode); | 70 | if (PTR_ERR(inode) == -ESTALE) { |
71 | ext2_error(dir->i_sb, __func__, | ||
72 | "deleted inode referenced: %lu", | ||
73 | ino); | ||
74 | return ERR_PTR(-EIO); | ||
75 | } else { | ||
76 | return ERR_CAST(inode); | ||
77 | } | ||
78 | } | ||
71 | } | 79 | } |
72 | return d_splice_alias(inode, dentry); | 80 | return d_splice_alias(inode, dentry); |
73 | } | 81 | } |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8fed2ed12f38..f58ecbc416c8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -849,6 +849,81 @@ err: | |||
849 | return err; | 849 | return err; |
850 | } | 850 | } |
851 | 851 | ||
852 | static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, | ||
853 | struct fuse_copy_state *cs) | ||
854 | { | ||
855 | struct fuse_notify_inval_inode_out outarg; | ||
856 | int err = -EINVAL; | ||
857 | |||
858 | if (size != sizeof(outarg)) | ||
859 | goto err; | ||
860 | |||
861 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | ||
862 | if (err) | ||
863 | goto err; | ||
864 | fuse_copy_finish(cs); | ||
865 | |||
866 | down_read(&fc->killsb); | ||
867 | err = -ENOENT; | ||
868 | if (!fc->sb) | ||
869 | goto err_unlock; | ||
870 | |||
871 | err = fuse_reverse_inval_inode(fc->sb, outarg.ino, | ||
872 | outarg.off, outarg.len); | ||
873 | |||
874 | err_unlock: | ||
875 | up_read(&fc->killsb); | ||
876 | return err; | ||
877 | |||
878 | err: | ||
879 | fuse_copy_finish(cs); | ||
880 | return err; | ||
881 | } | ||
882 | |||
883 | static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, | ||
884 | struct fuse_copy_state *cs) | ||
885 | { | ||
886 | struct fuse_notify_inval_entry_out outarg; | ||
887 | int err = -EINVAL; | ||
888 | char buf[FUSE_NAME_MAX+1]; | ||
889 | struct qstr name; | ||
890 | |||
891 | if (size < sizeof(outarg)) | ||
892 | goto err; | ||
893 | |||
894 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | ||
895 | if (err) | ||
896 | goto err; | ||
897 | |||
898 | err = -ENAMETOOLONG; | ||
899 | if (outarg.namelen > FUSE_NAME_MAX) | ||
900 | goto err; | ||
901 | |||
902 | name.name = buf; | ||
903 | name.len = outarg.namelen; | ||
904 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); | ||
905 | if (err) | ||
906 | goto err; | ||
907 | fuse_copy_finish(cs); | ||
908 | buf[outarg.namelen] = 0; | ||
909 | name.hash = full_name_hash(name.name, name.len); | ||
910 | |||
911 | down_read(&fc->killsb); | ||
912 | err = -ENOENT; | ||
913 | if (!fc->sb) | ||
914 | goto err_unlock; | ||
915 | |||
916 | err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); | ||
917 | |||
918 | err_unlock: | ||
919 | up_read(&fc->killsb); | ||
920 | return err; | ||
921 | |||
922 | err: | ||
923 | fuse_copy_finish(cs); | ||
924 | return err; | ||
925 | } | ||
926 | |||
852 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 927 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
853 | unsigned int size, struct fuse_copy_state *cs) | 928 | unsigned int size, struct fuse_copy_state *cs) |
854 | { | 929 | { |
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | |||
856 | case FUSE_NOTIFY_POLL: | 931 | case FUSE_NOTIFY_POLL: |
857 | return fuse_notify_poll(fc, size, cs); | 932 | return fuse_notify_poll(fc, size, cs); |
858 | 933 | ||
934 | case FUSE_NOTIFY_INVAL_INODE: | ||
935 | return fuse_notify_inval_inode(fc, size, cs); | ||
936 | |||
937 | case FUSE_NOTIFY_INVAL_ENTRY: | ||
938 | return fuse_notify_inval_entry(fc, size, cs); | ||
939 | |||
859 | default: | 940 | default: |
860 | fuse_copy_finish(cs); | 941 | fuse_copy_finish(cs); |
861 | return -EINVAL; | 942 | return -EINVAL; |
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, | |||
910 | unsigned long nr_segs, loff_t pos) | 991 | unsigned long nr_segs, loff_t pos) |
911 | { | 992 | { |
912 | int err; | 993 | int err; |
913 | unsigned nbytes = iov_length(iov, nr_segs); | 994 | size_t nbytes = iov_length(iov, nr_segs); |
914 | struct fuse_req *req; | 995 | struct fuse_req *req; |
915 | struct fuse_out_header oh; | 996 | struct fuse_out_header oh; |
916 | struct fuse_copy_state cs; | 997 | struct fuse_copy_state cs; |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index b3089a083d30..e703654e7f40 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
375 | struct fuse_conn *fc = get_fuse_conn(dir); | 375 | struct fuse_conn *fc = get_fuse_conn(dir); |
376 | struct fuse_req *req; | 376 | struct fuse_req *req; |
377 | struct fuse_req *forget_req; | 377 | struct fuse_req *forget_req; |
378 | struct fuse_open_in inarg; | 378 | struct fuse_create_in inarg; |
379 | struct fuse_open_out outopen; | 379 | struct fuse_open_out outopen; |
380 | struct fuse_entry_out outentry; | 380 | struct fuse_entry_out outentry; |
381 | struct fuse_file *ff; | 381 | struct fuse_file *ff; |
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
399 | if (!ff) | 399 | if (!ff) |
400 | goto out_put_request; | 400 | goto out_put_request; |
401 | 401 | ||
402 | if (!fc->dont_mask) | ||
403 | mode &= ~current_umask(); | ||
404 | |||
402 | flags &= ~O_NOCTTY; | 405 | flags &= ~O_NOCTTY; |
403 | memset(&inarg, 0, sizeof(inarg)); | 406 | memset(&inarg, 0, sizeof(inarg)); |
404 | memset(&outentry, 0, sizeof(outentry)); | 407 | memset(&outentry, 0, sizeof(outentry)); |
405 | inarg.flags = flags; | 408 | inarg.flags = flags; |
406 | inarg.mode = mode; | 409 | inarg.mode = mode; |
410 | inarg.umask = current_umask(); | ||
407 | req->in.h.opcode = FUSE_CREATE; | 411 | req->in.h.opcode = FUSE_CREATE; |
408 | req->in.h.nodeid = get_node_id(dir); | 412 | req->in.h.nodeid = get_node_id(dir); |
409 | req->in.numargs = 2; | 413 | req->in.numargs = 2; |
410 | req->in.args[0].size = sizeof(inarg); | 414 | req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : |
415 | sizeof(inarg); | ||
411 | req->in.args[0].value = &inarg; | 416 | req->in.args[0].value = &inarg; |
412 | req->in.args[1].size = entry->d_name.len + 1; | 417 | req->in.args[1].size = entry->d_name.len + 1; |
413 | req->in.args[1].value = entry->d_name.name; | 418 | req->in.args[1].value = entry->d_name.name; |
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, | |||
546 | if (IS_ERR(req)) | 551 | if (IS_ERR(req)) |
547 | return PTR_ERR(req); | 552 | return PTR_ERR(req); |
548 | 553 | ||
554 | if (!fc->dont_mask) | ||
555 | mode &= ~current_umask(); | ||
556 | |||
549 | memset(&inarg, 0, sizeof(inarg)); | 557 | memset(&inarg, 0, sizeof(inarg)); |
550 | inarg.mode = mode; | 558 | inarg.mode = mode; |
551 | inarg.rdev = new_encode_dev(rdev); | 559 | inarg.rdev = new_encode_dev(rdev); |
560 | inarg.umask = current_umask(); | ||
552 | req->in.h.opcode = FUSE_MKNOD; | 561 | req->in.h.opcode = FUSE_MKNOD; |
553 | req->in.numargs = 2; | 562 | req->in.numargs = 2; |
554 | req->in.args[0].size = sizeof(inarg); | 563 | req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : |
564 | sizeof(inarg); | ||
555 | req->in.args[0].value = &inarg; | 565 | req->in.args[0].value = &inarg; |
556 | req->in.args[1].size = entry->d_name.len + 1; | 566 | req->in.args[1].size = entry->d_name.len + 1; |
557 | req->in.args[1].value = entry->d_name.name; | 567 | req->in.args[1].value = entry->d_name.name; |
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode) | |||
578 | if (IS_ERR(req)) | 588 | if (IS_ERR(req)) |
579 | return PTR_ERR(req); | 589 | return PTR_ERR(req); |
580 | 590 | ||
591 | if (!fc->dont_mask) | ||
592 | mode &= ~current_umask(); | ||
593 | |||
581 | memset(&inarg, 0, sizeof(inarg)); | 594 | memset(&inarg, 0, sizeof(inarg)); |
582 | inarg.mode = mode; | 595 | inarg.mode = mode; |
596 | inarg.umask = current_umask(); | ||
583 | req->in.h.opcode = FUSE_MKDIR; | 597 | req->in.h.opcode = FUSE_MKDIR; |
584 | req->in.numargs = 2; | 598 | req->in.numargs = 2; |
585 | req->in.args[0].size = sizeof(inarg); | 599 | req->in.args[0].size = sizeof(inarg); |
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
845 | return err; | 859 | return err; |
846 | } | 860 | } |
847 | 861 | ||
862 | int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, | ||
863 | struct qstr *name) | ||
864 | { | ||
865 | int err = -ENOTDIR; | ||
866 | struct inode *parent; | ||
867 | struct dentry *dir; | ||
868 | struct dentry *entry; | ||
869 | |||
870 | parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid); | ||
871 | if (!parent) | ||
872 | return -ENOENT; | ||
873 | |||
874 | mutex_lock(&parent->i_mutex); | ||
875 | if (!S_ISDIR(parent->i_mode)) | ||
876 | goto unlock; | ||
877 | |||
878 | err = -ENOENT; | ||
879 | dir = d_find_alias(parent); | ||
880 | if (!dir) | ||
881 | goto unlock; | ||
882 | |||
883 | entry = d_lookup(dir, name); | ||
884 | dput(dir); | ||
885 | if (!entry) | ||
886 | goto unlock; | ||
887 | |||
888 | fuse_invalidate_attr(parent); | ||
889 | fuse_invalidate_entry(entry); | ||
890 | dput(entry); | ||
891 | err = 0; | ||
892 | |||
893 | unlock: | ||
894 | mutex_unlock(&parent->i_mutex); | ||
895 | iput(parent); | ||
896 | return err; | ||
897 | } | ||
898 | |||
848 | /* | 899 | /* |
849 | * Calling into a user-controlled filesystem gives the filesystem | 900 | * Calling into a user-controlled filesystem gives the filesystem |
850 | * daemon ptrace-like capabilities over the requester process. This | 901 | * daemon ptrace-like capabilities over the requester process. This |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index fce6ce694fde..cbc464043b6f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait) | |||
1922 | 1922 | ||
1923 | req = fuse_get_req(fc); | 1923 | req = fuse_get_req(fc); |
1924 | if (IS_ERR(req)) | 1924 | if (IS_ERR(req)) |
1925 | return PTR_ERR(req); | 1925 | return POLLERR; |
1926 | 1926 | ||
1927 | req->in.h.opcode = FUSE_POLL; | 1927 | req->in.h.opcode = FUSE_POLL; |
1928 | req->in.h.nodeid = ff->nodeid; | 1928 | req->in.h.nodeid = ff->nodeid; |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index aaf2f9ff970e..52b641fc0faf 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -446,6 +446,9 @@ struct fuse_conn { | |||
446 | /** Do multi-page cached writes */ | 446 | /** Do multi-page cached writes */ |
447 | unsigned big_writes:1; | 447 | unsigned big_writes:1; |
448 | 448 | ||
449 | /** Don't apply umask to creation modes */ | ||
450 | unsigned dont_mask:1; | ||
451 | |||
449 | /** The number of requests waiting for completion */ | 452 | /** The number of requests waiting for completion */ |
450 | atomic_t num_waiting; | 453 | atomic_t num_waiting; |
451 | 454 | ||
@@ -481,6 +484,12 @@ struct fuse_conn { | |||
481 | 484 | ||
482 | /** Called on final put */ | 485 | /** Called on final put */ |
483 | void (*release)(struct fuse_conn *); | 486 | void (*release)(struct fuse_conn *); |
487 | |||
488 | /** Super block for this connection. */ | ||
489 | struct super_block *sb; | ||
490 | |||
491 | /** Read/write semaphore to hold when accessing sb. */ | ||
492 | struct rw_semaphore killsb; | ||
484 | }; | 493 | }; |
485 | 494 | ||
486 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) | 495 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) |
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations; | |||
509 | extern const struct dentry_operations fuse_dentry_operations; | 518 | extern const struct dentry_operations fuse_dentry_operations; |
510 | 519 | ||
511 | /** | 520 | /** |
521 | * Inode to nodeid comparison. | ||
522 | */ | ||
523 | int fuse_inode_eq(struct inode *inode, void *_nodeidp); | ||
524 | |||
525 | /** | ||
512 | * Get a filled in inode | 526 | * Get a filled in inode |
513 | */ | 527 | */ |
514 | struct inode *fuse_iget(struct super_block *sb, u64 nodeid, | 528 | struct inode *fuse_iget(struct super_block *sb, u64 nodeid, |
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode); | |||
708 | 722 | ||
709 | u64 fuse_get_attr_version(struct fuse_conn *fc); | 723 | u64 fuse_get_attr_version(struct fuse_conn *fc); |
710 | 724 | ||
725 | /** | ||
726 | * File-system tells the kernel to invalidate cache for the given node id. | ||
727 | */ | ||
728 | int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, | ||
729 | loff_t offset, loff_t len); | ||
730 | |||
731 | /** | ||
732 | * File-system tells the kernel to invalidate parent attributes and | ||
733 | * the dentry matching parent/name. | ||
734 | */ | ||
735 | int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, | ||
736 | struct qstr *name); | ||
737 | |||
711 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, | 738 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
712 | bool isdir); | 739 | bool isdir); |
713 | ssize_t fuse_direct_io(struct file *file, const char __user *buf, | 740 | ssize_t fuse_direct_io(struct file *file, const char __user *buf, |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index d8673ccf90b7..f91ccc4a189d 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) | |||
206 | BUG(); | 206 | BUG(); |
207 | } | 207 | } |
208 | 208 | ||
209 | static int fuse_inode_eq(struct inode *inode, void *_nodeidp) | 209 | int fuse_inode_eq(struct inode *inode, void *_nodeidp) |
210 | { | 210 | { |
211 | u64 nodeid = *(u64 *) _nodeidp; | 211 | u64 nodeid = *(u64 *) _nodeidp; |
212 | if (get_node_id(inode) == nodeid) | 212 | if (get_node_id(inode) == nodeid) |
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, | |||
257 | return inode; | 257 | return inode; |
258 | } | 258 | } |
259 | 259 | ||
260 | int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, | ||
261 | loff_t offset, loff_t len) | ||
262 | { | ||
263 | struct inode *inode; | ||
264 | pgoff_t pg_start; | ||
265 | pgoff_t pg_end; | ||
266 | |||
267 | inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); | ||
268 | if (!inode) | ||
269 | return -ENOENT; | ||
270 | |||
271 | fuse_invalidate_attr(inode); | ||
272 | if (offset >= 0) { | ||
273 | pg_start = offset >> PAGE_CACHE_SHIFT; | ||
274 | if (len <= 0) | ||
275 | pg_end = -1; | ||
276 | else | ||
277 | pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; | ||
278 | invalidate_inode_pages2_range(inode->i_mapping, | ||
279 | pg_start, pg_end); | ||
280 | } | ||
281 | iput(inode); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
260 | static void fuse_umount_begin(struct super_block *sb) | 285 | static void fuse_umount_begin(struct super_block *sb) |
261 | { | 286 | { |
262 | fuse_abort_conn(get_fuse_conn_super(sb)); | 287 | fuse_abort_conn(get_fuse_conn_super(sb)); |
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc) | |||
480 | memset(fc, 0, sizeof(*fc)); | 505 | memset(fc, 0, sizeof(*fc)); |
481 | spin_lock_init(&fc->lock); | 506 | spin_lock_init(&fc->lock); |
482 | mutex_init(&fc->inst_mutex); | 507 | mutex_init(&fc->inst_mutex); |
508 | init_rwsem(&fc->killsb); | ||
483 | atomic_set(&fc->count, 1); | 509 | atomic_set(&fc->count, 1); |
484 | init_waitqueue_head(&fc->waitq); | 510 | init_waitqueue_head(&fc->waitq); |
485 | init_waitqueue_head(&fc->blocked_waitq); | 511 | init_waitqueue_head(&fc->blocked_waitq); |
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
725 | } | 751 | } |
726 | if (arg->flags & FUSE_BIG_WRITES) | 752 | if (arg->flags & FUSE_BIG_WRITES) |
727 | fc->big_writes = 1; | 753 | fc->big_writes = 1; |
754 | if (arg->flags & FUSE_DONT_MASK) | ||
755 | fc->dont_mask = 1; | ||
728 | } else { | 756 | } else { |
729 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 757 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
730 | fc->no_lock = 1; | 758 | fc->no_lock = 1; |
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) | |||
748 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | 776 | arg->minor = FUSE_KERNEL_MINOR_VERSION; |
749 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; | 777 | arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; |
750 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | | 778 | arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | |
751 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; | 779 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; |
752 | req->in.h.opcode = FUSE_INIT; | 780 | req->in.h.opcode = FUSE_INIT; |
753 | req->in.numargs = 1; | 781 | req->in.numargs = 1; |
754 | req->in.args[0].size = sizeof(*arg); | 782 | req->in.args[0].size = sizeof(*arg); |
@@ -860,10 +888,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
860 | fuse_conn_init(fc); | 888 | fuse_conn_init(fc); |
861 | 889 | ||
862 | fc->dev = sb->s_dev; | 890 | fc->dev = sb->s_dev; |
891 | fc->sb = sb; | ||
863 | err = fuse_bdi_init(fc, sb); | 892 | err = fuse_bdi_init(fc, sb); |
864 | if (err) | 893 | if (err) |
865 | goto err_put_conn; | 894 | goto err_put_conn; |
866 | 895 | ||
896 | /* Handle umasking inside the fuse code */ | ||
897 | if (sb->s_flags & MS_POSIXACL) | ||
898 | fc->dont_mask = 1; | ||
899 | sb->s_flags |= MS_POSIXACL; | ||
900 | |||
867 | fc->release = fuse_free_conn; | 901 | fc->release = fuse_free_conn; |
868 | fc->flags = d.flags; | 902 | fc->flags = d.flags; |
869 | fc->user_id = d.user_id; | 903 | fc->user_id = d.user_id; |
@@ -941,12 +975,25 @@ static int fuse_get_sb(struct file_system_type *fs_type, | |||
941 | return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); | 975 | return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); |
942 | } | 976 | } |
943 | 977 | ||
978 | static void fuse_kill_sb_anon(struct super_block *sb) | ||
979 | { | ||
980 | struct fuse_conn *fc = get_fuse_conn_super(sb); | ||
981 | |||
982 | if (fc) { | ||
983 | down_write(&fc->killsb); | ||
984 | fc->sb = NULL; | ||
985 | up_write(&fc->killsb); | ||
986 | } | ||
987 | |||
988 | kill_anon_super(sb); | ||
989 | } | ||
990 | |||
944 | static struct file_system_type fuse_fs_type = { | 991 | static struct file_system_type fuse_fs_type = { |
945 | .owner = THIS_MODULE, | 992 | .owner = THIS_MODULE, |
946 | .name = "fuse", | 993 | .name = "fuse", |
947 | .fs_flags = FS_HAS_SUBTYPE, | 994 | .fs_flags = FS_HAS_SUBTYPE, |
948 | .get_sb = fuse_get_sb, | 995 | .get_sb = fuse_get_sb, |
949 | .kill_sb = kill_anon_super, | 996 | .kill_sb = fuse_kill_sb_anon, |
950 | }; | 997 | }; |
951 | 998 | ||
952 | #ifdef CONFIG_BLOCK | 999 | #ifdef CONFIG_BLOCK |
@@ -958,11 +1005,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type, | |||
958 | mnt); | 1005 | mnt); |
959 | } | 1006 | } |
960 | 1007 | ||
1008 | static void fuse_kill_sb_blk(struct super_block *sb) | ||
1009 | { | ||
1010 | struct fuse_conn *fc = get_fuse_conn_super(sb); | ||
1011 | |||
1012 | if (fc) { | ||
1013 | down_write(&fc->killsb); | ||
1014 | fc->sb = NULL; | ||
1015 | up_write(&fc->killsb); | ||
1016 | } | ||
1017 | |||
1018 | kill_block_super(sb); | ||
1019 | } | ||
1020 | |||
961 | static struct file_system_type fuseblk_fs_type = { | 1021 | static struct file_system_type fuseblk_fs_type = { |
962 | .owner = THIS_MODULE, | 1022 | .owner = THIS_MODULE, |
963 | .name = "fuseblk", | 1023 | .name = "fuseblk", |
964 | .get_sb = fuse_get_sb_blk, | 1024 | .get_sb = fuse_get_sb_blk, |
965 | .kill_sb = kill_block_super, | 1025 | .kill_sb = fuse_kill_sb_blk, |
966 | .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, | 1026 | .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, |
967 | }; | 1027 | }; |
968 | 1028 | ||
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index fe02ad4740e7..032604e5ef2c 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) | |||
972 | sb->s_blocksize_bits = 10; | 972 | sb->s_blocksize_bits = 10; |
973 | sb->s_magic = HOSTFS_SUPER_MAGIC; | 973 | sb->s_magic = HOSTFS_SUPER_MAGIC; |
974 | sb->s_op = &hostfs_sbops; | 974 | sb->s_op = &hostfs_sbops; |
975 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
975 | 976 | ||
976 | /* NULL is printed as <NULL> by sprintf: avoid that. */ | 977 | /* NULL is printed as <NULL> by sprintf: avoid that. */ |
977 | if (req_root == NULL) | 978 | if (req_root == NULL) |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 7515e73e2bfb..696686cc206e 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
130 | if (jffs2_sum_active()) { | 130 | if (jffs2_sum_active()) { |
131 | s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | 131 | s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); |
132 | if (!s) { | 132 | if (!s) { |
133 | kfree(flashbuf); | ||
134 | JFFS2_WARNING("Can't allocate memory for summary\n"); | 133 | JFFS2_WARNING("Can't allocate memory for summary\n"); |
135 | return -ENOMEM; | 134 | ret = -ENOMEM; |
135 | goto out; | ||
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/fs/namei.c b/fs/namei.c index 5b961eb71cbf..f3c5b278895a 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1761,6 +1761,10 @@ do_last: | |||
1761 | goto exit; | 1761 | goto exit; |
1762 | } | 1762 | } |
1763 | filp = nameidata_to_filp(&nd, open_flag); | 1763 | filp = nameidata_to_filp(&nd, open_flag); |
1764 | if (IS_ERR(filp)) | ||
1765 | ima_counts_put(&nd.path, | ||
1766 | acc_mode & (MAY_READ | MAY_WRITE | | ||
1767 | MAY_EXEC)); | ||
1764 | mnt_drop_write(nd.path.mnt); | 1768 | mnt_drop_write(nd.path.mnt); |
1765 | if (nd.root.mnt) | 1769 | if (nd.root.mnt) |
1766 | path_put(&nd.root); | 1770 | path_put(&nd.root); |
@@ -1817,6 +1821,9 @@ ok: | |||
1817 | goto exit; | 1821 | goto exit; |
1818 | } | 1822 | } |
1819 | filp = nameidata_to_filp(&nd, open_flag); | 1823 | filp = nameidata_to_filp(&nd, open_flag); |
1824 | if (IS_ERR(filp)) | ||
1825 | ima_counts_put(&nd.path, | ||
1826 | acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC)); | ||
1820 | /* | 1827 | /* |
1821 | * It is now safe to drop the mnt write | 1828 | * It is now safe to drop the mnt write |
1822 | * because the filp has had a write taken | 1829 | * because the filp has had a write taken |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ff231ad23895..ff27a2965844 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -296,12 +296,15 @@ static int inotify_fasync(int fd, struct file *file, int on) | |||
296 | static int inotify_release(struct inode *ignored, struct file *file) | 296 | static int inotify_release(struct inode *ignored, struct file *file) |
297 | { | 297 | { |
298 | struct fsnotify_group *group = file->private_data; | 298 | struct fsnotify_group *group = file->private_data; |
299 | struct user_struct *user = group->inotify_data.user; | ||
299 | 300 | ||
300 | fsnotify_clear_marks_by_group(group); | 301 | fsnotify_clear_marks_by_group(group); |
301 | 302 | ||
302 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ | 303 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ |
303 | fsnotify_put_group(group); | 304 | fsnotify_put_group(group); |
304 | 305 | ||
306 | atomic_dec(&user->inotify_devs); | ||
307 | |||
305 | return 0; | 308 | return 0; |
306 | } | 309 | } |
307 | 310 | ||