diff options
author | Andy Grover <andy.grover@oracle.com> | 2009-04-01 04:20:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-04-02 03:52:22 -0400 |
commit | 8cbd9606a6367c221a7bbcc47f3ab1a8c31b6437 (patch) | |
tree | 9ff2adf88be32383fd59dc133f1cd6670b9ee815 /net/rds/ib_recv.c | |
parent | 745cbccac3fe8cead529a1b3358e1e86a1505bfa (diff) |
RDS: Use spinlock to protect 64b value update on 32b archs
We have a 64bit value that needs to be set atomically.
This is easy and quick on all 64bit archs, and can also be done
on x86/32 with set_64bit() (uses cmpxchg8b). However other
32b archs don't have this.
I actually changed this to the current state in preparation for
mainline because the old way (using a spinlock on 32b) resulted in
unsightly #ifdefs in the code. But obviously, being correct takes
precedence.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r-- | net/rds/ib_recv.c | 37 |
1 files changed, 35 insertions, 2 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 5061b5502162..36d931573ff4 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -395,10 +395,37 @@ void rds_ib_recv_init_ack(struct rds_ib_connection *ic) | |||
395 | * room for it beyond the ring size. Send completion notices its special | 395 | * room for it beyond the ring size. Send completion notices its special |
396 | * wr_id and avoids working with the ring in that case. | 396 | * wr_id and avoids working with the ring in that case. |
397 | */ | 397 | */ |
398 | #ifndef KERNEL_HAS_ATOMIC64 | ||
398 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, | 399 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, |
399 | int ack_required) | 400 | int ack_required) |
400 | { | 401 | { |
401 | rds_ib_set_64bit(&ic->i_ack_next, seq); | 402 | unsigned long flags; |
403 | |||
404 | spin_lock_irqsave(&ic->i_ack_lock, flags); | ||
405 | ic->i_ack_next = seq; | ||
406 | if (ack_required) | ||
407 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | ||
408 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | ||
409 | } | ||
410 | |||
411 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | ||
412 | { | ||
413 | unsigned long flags; | ||
414 | u64 seq; | ||
415 | |||
416 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | ||
417 | |||
418 | spin_lock_irqsave(&ic->i_ack_lock, flags); | ||
419 | seq = ic->i_ack_next; | ||
420 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | ||
421 | |||
422 | return seq; | ||
423 | } | ||
424 | #else | ||
425 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, | ||
426 | int ack_required) | ||
427 | { | ||
428 | atomic64_set(&ic->i_ack_next, seq); | ||
402 | if (ack_required) { | 429 | if (ack_required) { |
403 | smp_mb__before_clear_bit(); | 430 | smp_mb__before_clear_bit(); |
404 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 431 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
@@ -410,8 +437,10 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |||
410 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 437 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
411 | smp_mb__after_clear_bit(); | 438 | smp_mb__after_clear_bit(); |
412 | 439 | ||
413 | return ic->i_ack_next; | 440 | return atomic64_read(&ic->i_ack_next); |
414 | } | 441 | } |
442 | #endif | ||
443 | |||
415 | 444 | ||
416 | static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) | 445 | static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) |
417 | { | 446 | { |
@@ -464,6 +493,10 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi | |||
464 | * - i_ack_next, which is the last sequence number we received | 493 | * - i_ack_next, which is the last sequence number we received |
465 | * | 494 | * |
466 | * Potentially, send queue and receive queue handlers can run concurrently. | 495 | * Potentially, send queue and receive queue handlers can run concurrently. |
496 | * It would be nice to not have to use a spinlock to synchronize things, | ||
497 | * but the one problem that rules this out is that 64bit updates are | ||
498 | * not atomic on all platforms. Things would be a lot simpler if | ||
499 | * we had atomic64 or maybe cmpxchg64 everywhere. | ||
467 | * | 500 | * |
468 | * Reconnecting complicates this picture just slightly. When we | 501 | * Reconnecting complicates this picture just slightly. When we |
469 | * reconnect, we may be seeing duplicate packets. The peer | 502 | * reconnect, we may be seeing duplicate packets. The peer |