diff options
author | Shan Wei <davidshan@tencent.com> | 2012-11-12 10:52:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-19 18:59:44 -0500 |
commit | ae4b46e9d7128d2d76e6857fe0b9fc240e8ac695 (patch) | |
tree | b24b980aa18e64b07398ddb1c3ac34418e7f7a6c | |
parent | 1f743b07652f11100bee004e261b9931632beac1 (diff) |
net: rds: use this_cpu_* per-cpu helper
Signed-off-by: Shan Wei <davidshan@tencent.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/rds/ib.h | 2 | ||||
-rw-r--r-- | net/rds/ib_recv.c | 24 |
2 files changed, 14 insertions, 12 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h index 8d2b3d5a7c21..7280ab8810c2 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -50,7 +50,7 @@ struct rds_ib_cache_head { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct rds_ib_refill_cache { | 52 | struct rds_ib_refill_cache { |
53 | struct rds_ib_cache_head *percpu; | 53 | struct rds_ib_cache_head __percpu *percpu; |
54 | struct list_head *xfer; | 54 | struct list_head *xfer; |
55 | struct list_head *ready; | 55 | struct list_head *ready; |
56 | }; | 56 | }; |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 8d194912c695..8c5bc857f04d 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -418,20 +418,21 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
418 | struct rds_ib_refill_cache *cache) | 418 | struct rds_ib_refill_cache *cache) |
419 | { | 419 | { |
420 | unsigned long flags; | 420 | unsigned long flags; |
421 | struct rds_ib_cache_head *chp; | ||
422 | struct list_head *old; | 421 | struct list_head *old; |
422 | struct list_head __percpu *chpfirst; | ||
423 | 423 | ||
424 | local_irq_save(flags); | 424 | local_irq_save(flags); |
425 | 425 | ||
426 | chp = per_cpu_ptr(cache->percpu, smp_processor_id()); | 426 | chpfirst = __this_cpu_read(cache->percpu->first); |
427 | if (!chp->first) | 427 | if (!chpfirst) |
428 | INIT_LIST_HEAD(new_item); | 428 | INIT_LIST_HEAD(new_item); |
429 | else /* put on front */ | 429 | else /* put on front */ |
430 | list_add_tail(new_item, chp->first); | 430 | list_add_tail(new_item, chpfirst); |
431 | chp->first = new_item; | ||
432 | chp->count++; | ||
433 | 431 | ||
434 | if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT) | 432 | __this_cpu_write(chpfirst, new_item); |
433 | __this_cpu_inc(cache->percpu->count); | ||
434 | |||
435 | if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) | ||
435 | goto end; | 436 | goto end; |
436 | 437 | ||
437 | /* | 438 | /* |
@@ -443,12 +444,13 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
443 | do { | 444 | do { |
444 | old = xchg(&cache->xfer, NULL); | 445 | old = xchg(&cache->xfer, NULL); |
445 | if (old) | 446 | if (old) |
446 | list_splice_entire_tail(old, chp->first); | 447 | list_splice_entire_tail(old, chpfirst); |
447 | old = cmpxchg(&cache->xfer, NULL, chp->first); | 448 | old = cmpxchg(&cache->xfer, NULL, chpfirst); |
448 | } while (old); | 449 | } while (old); |
449 | 450 | ||
450 | chp->first = NULL; | 451 | |
451 | chp->count = 0; | 452 | __this_cpu_write(chpfirst, NULL); |
453 | __this_cpu_write(cache->percpu->count, 0); | ||
452 | end: | 454 | end: |
453 | local_irq_restore(flags); | 455 | local_irq_restore(flags); |
454 | } | 456 | } |