aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-11 08:49:59 -0500
committerDavid S. Miller <davem@davemloft.net>2010-03-17 00:16:55 -0400
commit2e7b3b994529d4760231a45a6b88950187bda877 (patch)
treed073317ace5a9ef948a826aed32e8b35f21e2193 /net/rds
parent8e82376e5f72bb576504c8c6117685e56c1b97db (diff)
RDS: Fix congestion issues for loopback
We have two kinds of loopback: software (via loop transport) and hardware (via IB). sw is used for 127.0.0.1, and doesn't support rdma ops. hw is used for sends to local device IPs, and supports rdma. Both are used in different cases. For both of these, when there is a congestion map update, we want to call rds_cong_map_updated() but not actually send anything -- since loopback local and foreign congestion maps point to the same spot, they're already in sync. The old code never called sw loop's xmit_cong_map(),so rds_cong_map_updated() wasn't being called for it. sw loop ports would not work right with the congestion monitor. Fixing that meant that hw loopback now would send congestion maps to itself. This is also undesirable (racy), so we check for this case in the ib-specific xmit code. Signed-off-by: Andy Grover <andy.grover@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_send.c7
-rw-r--r--net/rds/loop.c7
3 files changed, 7 insertions, 9 deletions
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 6d06cac2649c..dd2711df640b 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -218,8 +218,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
218 spin_lock_irqsave(&rds_cong_lock, flags); 218 spin_lock_irqsave(&rds_cong_lock, flags);
219 219
220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
221 if (conn->c_loopback)
222 continue;
223 if (!test_and_set_bit(0, &conn->c_map_queued)) { 221 if (!test_and_set_bit(0, &conn->c_map_queued)) {
224 rds_stats_inc(s_cong_update_queued); 222 rds_stats_inc(s_cong_update_queued);
225 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 223 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index f380c3f01256..c18228aec779 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -482,6 +482,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
482 BUG_ON(off % RDS_FRAG_SIZE); 482 BUG_ON(off % RDS_FRAG_SIZE);
483 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 483 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
484 484
485 /* Do not send cong updates to IB loopback */
486 if (conn->c_loopback
487 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
488 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
489 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
490 }
491
485 /* FIXME we may overallocate here */ 492 /* FIXME we may overallocate here */
486 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 493 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
487 i = 1; 494 i = 1;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 4a61997f554d..93a45f1ce61f 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -80,16 +80,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn,
80 struct rds_cong_map *map, 80 struct rds_cong_map *map,
81 unsigned long offset) 81 unsigned long offset)
82{ 82{
83 unsigned long i;
84
85 BUG_ON(offset); 83 BUG_ON(offset);
86 BUG_ON(map != conn->c_lcong); 84 BUG_ON(map != conn->c_lcong);
87 85
88 for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
89 memcpy((void *)conn->c_fcong->m_page_addrs[i],
90 (void *)map->m_page_addrs[i], PAGE_SIZE);
91 }
92
93 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 86 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
94 87
95 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 88 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;