diff options
author | Kent Overstreet <koverstreet@google.com> | 2012-10-12 18:29:33 -0400 |
---|---|---|
committer | Kent Overstreet <koverstreet@google.com> | 2013-03-23 17:15:27 -0400 |
commit | 9f060e2231ca96ca94f2ffcff730acd72606b280 (patch) | |
tree | 4818da8b59010493810e04d7a6273707875dc73c /fs/bio-integrity.c | |
parent | 6fda981cafbf908acd11e1e636fec50e99d56a47 (diff) |
block: Convert integrity to bvec_alloc_bs()
This adds a pointer to the bvec array to struct bio_integrity_payload,
instead of the bvecs always being inline; then the bvecs are allocated
with bvec_alloc_bs().
Changed bvec_alloc_bs() and bvec_free_bs() to take a pointer to a
mempool instead of the bioset, so that bio integrity can use a different
mempool for its bvecs, and thus avoid a potential deadlock.
This is eventually for immutable bio vecs - immutable bvecs aren't
useful if we still have to copy them, hence the need for the pointer.
Less code is always nice too, though.
Also, bio_integrity_alloc() was using fs_bio_set if no bio_set was
specified. This was wrong - using the bio_set doesn't protect us from
memory allocation failures, because we just used kmalloc for the
bio_integrity_payload. But it does introduce the possibility of
deadlock, if for some reason we weren't supposed to be using fs_bio_set.
Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'fs/bio-integrity.c')
-rw-r--r-- | fs/bio-integrity.c | 132 |
1 files changed, 48 insertions, 84 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 94fa1c562c0e..8c4c604c840d 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -27,48 +27,11 @@ | |||
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | 29 | ||
30 | struct integrity_slab { | 30 | #define BIP_INLINE_VECS 4 |
31 | struct kmem_cache *slab; | ||
32 | unsigned short nr_vecs; | ||
33 | char name[8]; | ||
34 | }; | ||
35 | |||
36 | #define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) } | ||
37 | struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = { | ||
38 | IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES), | ||
39 | }; | ||
40 | #undef IS | ||
41 | 31 | ||
32 | static struct kmem_cache *bip_slab; | ||
42 | static struct workqueue_struct *kintegrityd_wq; | 33 | static struct workqueue_struct *kintegrityd_wq; |
43 | 34 | ||
44 | static inline unsigned int vecs_to_idx(unsigned int nr) | ||
45 | { | ||
46 | switch (nr) { | ||
47 | case 1: | ||
48 | return 0; | ||
49 | case 2 ... 4: | ||
50 | return 1; | ||
51 | case 5 ... 16: | ||
52 | return 2; | ||
53 | case 17 ... 64: | ||
54 | return 3; | ||
55 | case 65 ... 128: | ||
56 | return 4; | ||
57 | case 129 ... BIO_MAX_PAGES: | ||
58 | return 5; | ||
59 | default: | ||
60 | BUG(); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static inline int use_bip_pool(unsigned int idx) | ||
65 | { | ||
66 | if (idx == BIOVEC_MAX_IDX) | ||
67 | return 1; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | /** | 35 | /** |
73 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | 36 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio |
74 | * @bio: bio to attach integrity metadata to | 37 | * @bio: bio to attach integrity metadata to |
@@ -84,38 +47,41 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | |||
84 | unsigned int nr_vecs) | 47 | unsigned int nr_vecs) |
85 | { | 48 | { |
86 | struct bio_integrity_payload *bip; | 49 | struct bio_integrity_payload *bip; |
87 | unsigned int idx = vecs_to_idx(nr_vecs); | ||
88 | struct bio_set *bs = bio->bi_pool; | 50 | struct bio_set *bs = bio->bi_pool; |
89 | 51 | unsigned long idx = BIO_POOL_NONE; | |
90 | if (!bs) | 52 | unsigned inline_vecs; |
91 | bs = fs_bio_set; | 53 | |
92 | 54 | if (!bs) { | |
93 | BUG_ON(bio == NULL); | 55 | bip = kmalloc(sizeof(struct bio_integrity_payload) + |
94 | bip = NULL; | 56 | sizeof(struct bio_vec) * nr_vecs, gfp_mask); |
95 | 57 | inline_vecs = nr_vecs; | |
96 | /* Lower order allocations come straight from slab */ | 58 | } else { |
97 | if (!use_bip_pool(idx)) | ||
98 | bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask); | ||
99 | |||
100 | /* Use mempool if lower order alloc failed or max vecs were requested */ | ||
101 | if (bip == NULL) { | ||
102 | idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ | ||
103 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | 59 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); |
104 | 60 | inline_vecs = BIP_INLINE_VECS; | |
105 | if (unlikely(bip == NULL)) { | ||
106 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); | ||
107 | return NULL; | ||
108 | } | ||
109 | } | 61 | } |
110 | 62 | ||
63 | if (unlikely(!bip)) | ||
64 | return NULL; | ||
65 | |||
111 | memset(bip, 0, sizeof(*bip)); | 66 | memset(bip, 0, sizeof(*bip)); |
112 | 67 | ||
68 | if (nr_vecs > inline_vecs) { | ||
69 | bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, | ||
70 | bs->bvec_integrity_pool); | ||
71 | if (!bip->bip_vec) | ||
72 | goto err; | ||
73 | } else { | ||
74 | bip->bip_vec = bip->bip_inline_vecs; | ||
75 | } | ||
76 | |||
113 | bip->bip_slab = idx; | 77 | bip->bip_slab = idx; |
114 | bip->bip_bio = bio; | 78 | bip->bip_bio = bio; |
115 | bip->bip_vec = bip->bip_inline_vecs; | ||
116 | bio->bi_integrity = bip; | 79 | bio->bi_integrity = bip; |
117 | 80 | ||
118 | return bip; | 81 | return bip; |
82 | err: | ||
83 | mempool_free(bip, bs->bio_integrity_pool); | ||
84 | return NULL; | ||
119 | } | 85 | } |
120 | EXPORT_SYMBOL(bio_integrity_alloc); | 86 | EXPORT_SYMBOL(bio_integrity_alloc); |
121 | 87 | ||
@@ -131,20 +97,20 @@ void bio_integrity_free(struct bio *bio) | |||
131 | struct bio_integrity_payload *bip = bio->bi_integrity; | 97 | struct bio_integrity_payload *bip = bio->bi_integrity; |
132 | struct bio_set *bs = bio->bi_pool; | 98 | struct bio_set *bs = bio->bi_pool; |
133 | 99 | ||
134 | if (!bs) | ||
135 | bs = fs_bio_set; | ||
136 | |||
137 | BUG_ON(bip == NULL); | ||
138 | |||
139 | /* A cloned bio doesn't own the integrity metadata */ | 100 | /* A cloned bio doesn't own the integrity metadata */ |
140 | if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY) | 101 | if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY) |
141 | && bip->bip_buf != NULL) | 102 | && bip->bip_buf != NULL) |
142 | kfree(bip->bip_buf); | 103 | kfree(bip->bip_buf); |
143 | 104 | ||
144 | if (use_bip_pool(bip->bip_slab)) | 105 | if (bs) { |
106 | if (bip->bip_slab != BIO_POOL_NONE) | ||
107 | bvec_free(bs->bvec_integrity_pool, bip->bip_vec, | ||
108 | bip->bip_slab); | ||
109 | |||
145 | mempool_free(bip, bs->bio_integrity_pool); | 110 | mempool_free(bip, bs->bio_integrity_pool); |
146 | else | 111 | } else { |
147 | kmem_cache_free(bip_slab[bip->bip_slab].slab, bip); | 112 | kfree(bip); |
113 | } | ||
148 | 114 | ||
149 | bio->bi_integrity = NULL; | 115 | bio->bi_integrity = NULL; |
150 | } | 116 | } |
@@ -747,13 +713,14 @@ EXPORT_SYMBOL(bio_integrity_clone); | |||
747 | 713 | ||
748 | int bioset_integrity_create(struct bio_set *bs, int pool_size) | 714 | int bioset_integrity_create(struct bio_set *bs, int pool_size) |
749 | { | 715 | { |
750 | unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); | ||
751 | |||
752 | if (bs->bio_integrity_pool) | 716 | if (bs->bio_integrity_pool) |
753 | return 0; | 717 | return 0; |
754 | 718 | ||
755 | bs->bio_integrity_pool = | 719 | bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); |
756 | mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); | 720 | |
721 | bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); | ||
722 | if (!bs->bvec_integrity_pool) | ||
723 | return -1; | ||
757 | 724 | ||
758 | if (!bs->bio_integrity_pool) | 725 | if (!bs->bio_integrity_pool) |
759 | return -1; | 726 | return -1; |
@@ -766,13 +733,14 @@ void bioset_integrity_free(struct bio_set *bs) | |||
766 | { | 733 | { |
767 | if (bs->bio_integrity_pool) | 734 | if (bs->bio_integrity_pool) |
768 | mempool_destroy(bs->bio_integrity_pool); | 735 | mempool_destroy(bs->bio_integrity_pool); |
736 | |||
737 | if (bs->bvec_integrity_pool) | ||
738 | mempool_destroy(bs->bio_integrity_pool); | ||
769 | } | 739 | } |
770 | EXPORT_SYMBOL(bioset_integrity_free); | 740 | EXPORT_SYMBOL(bioset_integrity_free); |
771 | 741 | ||
772 | void __init bio_integrity_init(void) | 742 | void __init bio_integrity_init(void) |
773 | { | 743 | { |
774 | unsigned int i; | ||
775 | |||
776 | /* | 744 | /* |
777 | * kintegrityd won't block much but may burn a lot of CPU cycles. | 745 | * kintegrityd won't block much but may burn a lot of CPU cycles. |
778 | * Make it highpri CPU intensive wq with max concurrency of 1. | 746 | * Make it highpri CPU intensive wq with max concurrency of 1. |
@@ -782,14 +750,10 @@ void __init bio_integrity_init(void) | |||
782 | if (!kintegrityd_wq) | 750 | if (!kintegrityd_wq) |
783 | panic("Failed to create kintegrityd\n"); | 751 | panic("Failed to create kintegrityd\n"); |
784 | 752 | ||
785 | for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) { | 753 | bip_slab = kmem_cache_create("bio_integrity_payload", |
786 | unsigned int size; | 754 | sizeof(struct bio_integrity_payload) + |
787 | 755 | sizeof(struct bio_vec) * BIP_INLINE_VECS, | |
788 | size = sizeof(struct bio_integrity_payload) | 756 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
789 | + bip_slab[i].nr_vecs * sizeof(struct bio_vec); | 757 | if (!bip_slab) |
790 | 758 | panic("Failed to create slab\n"); | |
791 | bip_slab[i].slab = | ||
792 | kmem_cache_create(bip_slab[i].name, size, 0, | ||
793 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | ||
794 | } | ||
795 | } | 759 | } |