diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-23 09:50:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-23 22:07:52 -0400 |
commit | b9ecfdaa1090b5988422eaf5348ea1954d2d7219 (patch) | |
tree | c2721bb2c9be54c153869b0ed3b68bbee0bfc70a /lib/rhashtable.c | |
parent | b824478b2145be78ac19e1cf44e2b9036c7a9608 (diff) |
rhashtable: Allow GFP_ATOMIC bucket table allocation
This patch adds the ability to allocate bucket table with GFP_ATOMIC
instead of GFP_KERNEL. This is needed when we perform an immediate
rehash during insertion.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5e04403e25f5..220a11a13d40 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -58,7 +58,8 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |||
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | 60 | ||
61 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) | 61 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
62 | gfp_t gfp) | ||
62 | { | 63 | { |
63 | unsigned int i, size; | 64 | unsigned int i, size; |
64 | #if defined(CONFIG_PROVE_LOCKING) | 65 | #if defined(CONFIG_PROVE_LOCKING) |
@@ -75,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) | |||
75 | 76 | ||
76 | if (sizeof(spinlock_t) != 0) { | 77 | if (sizeof(spinlock_t) != 0) { |
77 | #ifdef CONFIG_NUMA | 78 | #ifdef CONFIG_NUMA |
78 | if (size * sizeof(spinlock_t) > PAGE_SIZE) | 79 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
80 | gfp == GFP_KERNEL) | ||
79 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); | 81 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
80 | else | 82 | else |
81 | #endif | 83 | #endif |
82 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | 84 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
83 | GFP_KERNEL); | 85 | gfp); |
84 | if (!tbl->locks) | 86 | if (!tbl->locks) |
85 | return -ENOMEM; | 87 | return -ENOMEM; |
86 | for (i = 0; i < size; i++) | 88 | for (i = 0; i < size; i++) |
@@ -105,23 +107,25 @@ static void bucket_table_free_rcu(struct rcu_head *head) | |||
105 | } | 107 | } |
106 | 108 | ||
107 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 109 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
108 | size_t nbuckets) | 110 | size_t nbuckets, |
111 | gfp_t gfp) | ||
109 | { | 112 | { |
110 | struct bucket_table *tbl = NULL; | 113 | struct bucket_table *tbl = NULL; |
111 | size_t size; | 114 | size_t size; |
112 | int i; | 115 | int i; |
113 | 116 | ||
114 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 117 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
115 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) | 118 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
116 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | 119 | gfp != GFP_KERNEL) |
117 | if (tbl == NULL) | 120 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
121 | if (tbl == NULL && gfp == GFP_KERNEL) | ||
118 | tbl = vzalloc(size); | 122 | tbl = vzalloc(size); |
119 | if (tbl == NULL) | 123 | if (tbl == NULL) |
120 | return NULL; | 124 | return NULL; |
121 | 125 | ||
122 | tbl->size = nbuckets; | 126 | tbl->size = nbuckets; |
123 | 127 | ||
124 | if (alloc_bucket_locks(ht, tbl) < 0) { | 128 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
125 | bucket_table_free(tbl); | 129 | bucket_table_free(tbl); |
126 | return NULL; | 130 | return NULL; |
127 | } | 131 | } |
@@ -288,7 +292,7 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
288 | 292 | ||
289 | old_tbl = rhashtable_last_table(ht, old_tbl); | 293 | old_tbl = rhashtable_last_table(ht, old_tbl); |
290 | 294 | ||
291 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); | 295 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
292 | if (new_tbl == NULL) | 296 | if (new_tbl == NULL) |
293 | return -ENOMEM; | 297 | return -ENOMEM; |
294 | 298 | ||
@@ -332,7 +336,7 @@ static int rhashtable_shrink(struct rhashtable *ht) | |||
332 | if (rht_dereference(old_tbl->future_tbl, ht)) | 336 | if (rht_dereference(old_tbl->future_tbl, ht)) |
333 | return -EEXIST; | 337 | return -EEXIST; |
334 | 338 | ||
335 | new_tbl = bucket_table_alloc(ht, size); | 339 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
336 | if (new_tbl == NULL) | 340 | if (new_tbl == NULL) |
337 | return -ENOMEM; | 341 | return -ENOMEM; |
338 | 342 | ||
@@ -689,7 +693,7 @@ int rhashtable_init(struct rhashtable *ht, | |||
689 | } | 693 | } |
690 | } | 694 | } |
691 | 695 | ||
692 | tbl = bucket_table_alloc(ht, size); | 696 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
693 | if (tbl == NULL) | 697 | if (tbl == NULL) |
694 | return -ENOMEM; | 698 | return -ENOMEM; |
695 | 699 | ||