diff options
Diffstat (limited to 'net/rds/iw_recv.c')
-rw-r--r-- | net/rds/iw_recv.c | 37 |
1 files changed, 35 insertions, 2 deletions
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index a1931f0027a2..fde470fa50d5 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -395,10 +395,37 @@ void rds_iw_recv_init_ack(struct rds_iw_connection *ic) | |||
395 | * room for it beyond the ring size. Send completion notices its special | 395 | * room for it beyond the ring size. Send completion notices its special |
396 | * wr_id and avoids working with the ring in that case. | 396 | * wr_id and avoids working with the ring in that case. |
397 | */ | 397 | */ |
398 | #ifndef KERNEL_HAS_ATOMIC64 | ||
398 | static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, | 399 | static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, |
399 | int ack_required) | 400 | int ack_required) |
400 | { | 401 | { |
401 | rds_iw_set_64bit(&ic->i_ack_next, seq); | 402 | unsigned long flags; |
403 | |||
404 | spin_lock_irqsave(&ic->i_ack_lock, flags); | ||
405 | ic->i_ack_next = seq; | ||
406 | if (ack_required) | ||
407 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | ||
408 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | ||
409 | } | ||
410 | |||
411 | static u64 rds_iw_get_ack(struct rds_iw_connection *ic) | ||
412 | { | ||
413 | unsigned long flags; | ||
414 | u64 seq; | ||
415 | |||
416 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | ||
417 | |||
418 | spin_lock_irqsave(&ic->i_ack_lock, flags); | ||
419 | seq = ic->i_ack_next; | ||
420 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | ||
421 | |||
422 | return seq; | ||
423 | } | ||
424 | #else | ||
425 | static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, | ||
426 | int ack_required) | ||
427 | { | ||
428 | atomic64_set(&ic->i_ack_next, seq); | ||
402 | if (ack_required) { | 429 | if (ack_required) { |
403 | smp_mb__before_clear_bit(); | 430 | smp_mb__before_clear_bit(); |
404 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 431 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
@@ -410,8 +437,10 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) | |||
410 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 437 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
411 | smp_mb__after_clear_bit(); | 438 | smp_mb__after_clear_bit(); |
412 | 439 | ||
413 | return ic->i_ack_next; | 440 | return atomic64_read(&ic->i_ack_next); |
414 | } | 441 | } |
442 | #endif | ||
443 | |||
415 | 444 | ||
416 | static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits) | 445 | static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits) |
417 | { | 446 | { |
@@ -464,6 +493,10 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi | |||
464 | * - i_ack_next, which is the last sequence number we received | 493 | * - i_ack_next, which is the last sequence number we received |
465 | * | 494 | * |
466 | * Potentially, send queue and receive queue handlers can run concurrently. | 495 | * Potentially, send queue and receive queue handlers can run concurrently. |
496 | * It would be nice to not have to use a spinlock to synchronize things, | ||
497 | * but the one problem that rules this out is that 64bit updates are | ||
498 | * not atomic on all platforms. Things would be a lot simpler if | ||
499 | * we had atomic64 or maybe cmpxchg64 everywhere. | ||
467 | * | 500 | * |
468 | * Reconnecting complicates this picture just slightly. When we | 501 | * Reconnecting complicates this picture just slightly. When we |
469 | * reconnect, we may be seeing duplicate packets. The peer | 502 | * reconnect, we may be seeing duplicate packets. The peer |