diff options
author | Trond Myklebust <trondmy@gmail.com> | 2018-10-01 10:41:52 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2018-10-29 16:58:04 -0400 |
commit | 1863d77f15da0addcd293a1719fa5d3ef8cde3ca (patch) | |
tree | 992c15ae2c740f4422ba9e29653389c856c3986a /net/sunrpc/cache.c | |
parent | d48cf356a13073853f19be6ca5ebbecfc2762ebe (diff) |
SUNRPC: Replace the cache_detail->hash_lock with a regular spinlock
Now that the reader functions are all RCU protected, use a regular
spinlock rather than a reader/writer lock.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r-- | net/sunrpc/cache.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 593cf8607414..f96345b1180e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -92,7 +92,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, | |||
92 | cache_init(new, detail); | 92 | cache_init(new, detail); |
93 | detail->init(new, key); | 93 | detail->init(new, key); |
94 | 94 | ||
95 | write_lock(&detail->hash_lock); | 95 | spin_lock(&detail->hash_lock); |
96 | 96 | ||
97 | /* check if entry appeared while we slept */ | 97 | /* check if entry appeared while we slept */ |
98 | hlist_for_each_entry_rcu(tmp, head, cache_list) { | 98 | hlist_for_each_entry_rcu(tmp, head, cache_list) { |
@@ -104,7 +104,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, | |||
104 | break; | 104 | break; |
105 | } | 105 | } |
106 | cache_get(tmp); | 106 | cache_get(tmp); |
107 | write_unlock(&detail->hash_lock); | 107 | spin_unlock(&detail->hash_lock); |
108 | cache_put(new, detail); | 108 | cache_put(new, detail); |
109 | return tmp; | 109 | return tmp; |
110 | } | 110 | } |
@@ -113,7 +113,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, | |||
113 | hlist_add_head_rcu(&new->cache_list, head); | 113 | hlist_add_head_rcu(&new->cache_list, head); |
114 | detail->entries++; | 114 | detail->entries++; |
115 | cache_get(new); | 115 | cache_get(new); |
116 | write_unlock(&detail->hash_lock); | 116 | spin_unlock(&detail->hash_lock); |
117 | 117 | ||
118 | if (freeme) | 118 | if (freeme) |
119 | cache_put(freeme, detail); | 119 | cache_put(freeme, detail); |
@@ -167,18 +167,18 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
167 | struct cache_head *tmp; | 167 | struct cache_head *tmp; |
168 | 168 | ||
169 | if (!test_bit(CACHE_VALID, &old->flags)) { | 169 | if (!test_bit(CACHE_VALID, &old->flags)) { |
170 | write_lock(&detail->hash_lock); | 170 | spin_lock(&detail->hash_lock); |
171 | if (!test_bit(CACHE_VALID, &old->flags)) { | 171 | if (!test_bit(CACHE_VALID, &old->flags)) { |
172 | if (test_bit(CACHE_NEGATIVE, &new->flags)) | 172 | if (test_bit(CACHE_NEGATIVE, &new->flags)) |
173 | set_bit(CACHE_NEGATIVE, &old->flags); | 173 | set_bit(CACHE_NEGATIVE, &old->flags); |
174 | else | 174 | else |
175 | detail->update(old, new); | 175 | detail->update(old, new); |
176 | cache_fresh_locked(old, new->expiry_time, detail); | 176 | cache_fresh_locked(old, new->expiry_time, detail); |
177 | write_unlock(&detail->hash_lock); | 177 | spin_unlock(&detail->hash_lock); |
178 | cache_fresh_unlocked(old, detail); | 178 | cache_fresh_unlocked(old, detail); |
179 | return old; | 179 | return old; |
180 | } | 180 | } |
181 | write_unlock(&detail->hash_lock); | 181 | spin_unlock(&detail->hash_lock); |
182 | } | 182 | } |
183 | /* We need to insert a new entry */ | 183 | /* We need to insert a new entry */ |
184 | tmp = detail->alloc(); | 184 | tmp = detail->alloc(); |
@@ -189,7 +189,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
189 | cache_init(tmp, detail); | 189 | cache_init(tmp, detail); |
190 | detail->init(tmp, old); | 190 | detail->init(tmp, old); |
191 | 191 | ||
192 | write_lock(&detail->hash_lock); | 192 | spin_lock(&detail->hash_lock); |
193 | if (test_bit(CACHE_NEGATIVE, &new->flags)) | 193 | if (test_bit(CACHE_NEGATIVE, &new->flags)) |
194 | set_bit(CACHE_NEGATIVE, &tmp->flags); | 194 | set_bit(CACHE_NEGATIVE, &tmp->flags); |
195 | else | 195 | else |
@@ -199,7 +199,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
199 | cache_get(tmp); | 199 | cache_get(tmp); |
200 | cache_fresh_locked(tmp, new->expiry_time, detail); | 200 | cache_fresh_locked(tmp, new->expiry_time, detail); |
201 | cache_fresh_locked(old, 0, detail); | 201 | cache_fresh_locked(old, 0, detail); |
202 | write_unlock(&detail->hash_lock); | 202 | spin_unlock(&detail->hash_lock); |
203 | cache_fresh_unlocked(tmp, detail); | 203 | cache_fresh_unlocked(tmp, detail); |
204 | cache_fresh_unlocked(old, detail); | 204 | cache_fresh_unlocked(old, detail); |
205 | cache_put(old, detail); | 205 | cache_put(old, detail); |
@@ -239,7 +239,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h | |||
239 | { | 239 | { |
240 | int rv; | 240 | int rv; |
241 | 241 | ||
242 | write_lock(&detail->hash_lock); | 242 | spin_lock(&detail->hash_lock); |
243 | rv = cache_is_valid(h); | 243 | rv = cache_is_valid(h); |
244 | if (rv == -EAGAIN) { | 244 | if (rv == -EAGAIN) { |
245 | set_bit(CACHE_NEGATIVE, &h->flags); | 245 | set_bit(CACHE_NEGATIVE, &h->flags); |
@@ -247,7 +247,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h | |||
247 | detail); | 247 | detail); |
248 | rv = -ENOENT; | 248 | rv = -ENOENT; |
249 | } | 249 | } |
250 | write_unlock(&detail->hash_lock); | 250 | spin_unlock(&detail->hash_lock); |
251 | cache_fresh_unlocked(h, detail); | 251 | cache_fresh_unlocked(h, detail); |
252 | return rv; | 252 | return rv; |
253 | } | 253 | } |
@@ -357,7 +357,7 @@ static struct delayed_work cache_cleaner; | |||
357 | 357 | ||
358 | void sunrpc_init_cache_detail(struct cache_detail *cd) | 358 | void sunrpc_init_cache_detail(struct cache_detail *cd) |
359 | { | 359 | { |
360 | rwlock_init(&cd->hash_lock); | 360 | spin_lock_init(&cd->hash_lock); |
361 | INIT_LIST_HEAD(&cd->queue); | 361 | INIT_LIST_HEAD(&cd->queue); |
362 | spin_lock(&cache_list_lock); | 362 | spin_lock(&cache_list_lock); |
363 | cd->nextcheck = 0; | 363 | cd->nextcheck = 0; |
@@ -377,11 +377,11 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd) | |||
377 | { | 377 | { |
378 | cache_purge(cd); | 378 | cache_purge(cd); |
379 | spin_lock(&cache_list_lock); | 379 | spin_lock(&cache_list_lock); |
380 | write_lock(&cd->hash_lock); | 380 | spin_lock(&cd->hash_lock); |
381 | if (current_detail == cd) | 381 | if (current_detail == cd) |
382 | current_detail = NULL; | 382 | current_detail = NULL; |
383 | list_del_init(&cd->others); | 383 | list_del_init(&cd->others); |
384 | write_unlock(&cd->hash_lock); | 384 | spin_unlock(&cd->hash_lock); |
385 | spin_unlock(&cache_list_lock); | 385 | spin_unlock(&cache_list_lock); |
386 | if (list_empty(&cache_list)) { | 386 | if (list_empty(&cache_list)) { |
387 | /* module must be being unloaded so its safe to kill the worker */ | 387 | /* module must be being unloaded so its safe to kill the worker */ |
@@ -438,7 +438,7 @@ static int cache_clean(void) | |||
438 | struct hlist_head *head; | 438 | struct hlist_head *head; |
439 | struct hlist_node *tmp; | 439 | struct hlist_node *tmp; |
440 | 440 | ||
441 | write_lock(¤t_detail->hash_lock); | 441 | spin_lock(¤t_detail->hash_lock); |
442 | 442 | ||
443 | /* Ok, now to clean this strand */ | 443 | /* Ok, now to clean this strand */ |
444 | 444 | ||
@@ -455,7 +455,7 @@ static int cache_clean(void) | |||
455 | break; | 455 | break; |
456 | } | 456 | } |
457 | 457 | ||
458 | write_unlock(¤t_detail->hash_lock); | 458 | spin_unlock(¤t_detail->hash_lock); |
459 | d = current_detail; | 459 | d = current_detail; |
460 | if (!ch) | 460 | if (!ch) |
461 | current_index ++; | 461 | current_index ++; |
@@ -510,9 +510,9 @@ void cache_purge(struct cache_detail *detail) | |||
510 | struct hlist_node *tmp = NULL; | 510 | struct hlist_node *tmp = NULL; |
511 | int i = 0; | 511 | int i = 0; |
512 | 512 | ||
513 | write_lock(&detail->hash_lock); | 513 | spin_lock(&detail->hash_lock); |
514 | if (!detail->entries) { | 514 | if (!detail->entries) { |
515 | write_unlock(&detail->hash_lock); | 515 | spin_unlock(&detail->hash_lock); |
516 | return; | 516 | return; |
517 | } | 517 | } |
518 | 518 | ||
@@ -524,13 +524,13 @@ void cache_purge(struct cache_detail *detail) | |||
524 | detail->entries--; | 524 | detail->entries--; |
525 | 525 | ||
526 | set_bit(CACHE_CLEANED, &ch->flags); | 526 | set_bit(CACHE_CLEANED, &ch->flags); |
527 | write_unlock(&detail->hash_lock); | 527 | spin_unlock(&detail->hash_lock); |
528 | cache_fresh_unlocked(ch, detail); | 528 | cache_fresh_unlocked(ch, detail); |
529 | cache_put(ch, detail); | 529 | cache_put(ch, detail); |
530 | write_lock(&detail->hash_lock); | 530 | spin_lock(&detail->hash_lock); |
531 | } | 531 | } |
532 | } | 532 | } |
533 | write_unlock(&detail->hash_lock); | 533 | spin_unlock(&detail->hash_lock); |
534 | } | 534 | } |
535 | EXPORT_SYMBOL_GPL(cache_purge); | 535 | EXPORT_SYMBOL_GPL(cache_purge); |
536 | 536 | ||
@@ -1873,13 +1873,13 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); | |||
1873 | 1873 | ||
1874 | void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) | 1874 | void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) |
1875 | { | 1875 | { |
1876 | write_lock(&cd->hash_lock); | 1876 | spin_lock(&cd->hash_lock); |
1877 | if (!hlist_unhashed(&h->cache_list)){ | 1877 | if (!hlist_unhashed(&h->cache_list)){ |
1878 | hlist_del_init_rcu(&h->cache_list); | 1878 | hlist_del_init_rcu(&h->cache_list); |
1879 | cd->entries--; | 1879 | cd->entries--; |
1880 | write_unlock(&cd->hash_lock); | 1880 | spin_unlock(&cd->hash_lock); |
1881 | cache_put(h, cd); | 1881 | cache_put(h, cd); |
1882 | } else | 1882 | } else |
1883 | write_unlock(&cd->hash_lock); | 1883 | spin_unlock(&cd->hash_lock); |
1884 | } | 1884 | } |
1885 | EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); | 1885 | EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); |