aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2009-07-17 09:13:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-20 11:03:03 -0400
commit02a6a2592e41d27644d647f3bce23598649961bc (patch)
tree7455566e6f3ff13279bb5949f06e398dbb8cff1a /net/rds
parent9ddbcfa098bae757d3760dd1dbf2847a0bd5a525 (diff)
RDS/IB: Handle connections using RDS 3.0 wire protocol
The big differences between RDS 3.0 and 3.1 are protocol-level flow control, and with 3.1 the header is in front of the data. The header always ends up in the header buffer, and the data goes in the data page. In 3.0 our "header" is a trailer, and will end up either in the data page, the header buffer, or split across the two. Since 3.1 is backwards- compatible with 3.0, we need to continue to support these cases. This patch does that -- if using RDS 3.0 wire protocol, it will copy the header from wherever it ended up into the header buffer. Signed-off-by: Andy Grover <andy.grover@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib.h12
-rw-r--r--net/rds/ib_cm.c9
-rw-r--r--net/rds/ib_recv.c43
3 files changed, 58 insertions, 6 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 420afb95ca1a..c0de7af6cf60 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -358,17 +358,25 @@ extern ctl_table rds_ib_sysctl_table[];
358/* 358/*
359 * Helper functions for getting/setting the header and data SGEs in 359 * Helper functions for getting/setting the header and data SGEs in
360 * RDS packets (not RDMA) 360 * RDS packets (not RDMA)
361 *
362 * From version 3.1 onwards, header is in front of data in the sge.
361 */ 363 */
362static inline struct ib_sge * 364static inline struct ib_sge *
363rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge) 365rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
364{ 366{
365 return &sge[0]; 367 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
368 return &sge[0];
369 else
370 return &sge[1];
366} 371}
367 372
368static inline struct ib_sge * 373static inline struct ib_sge *
369rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge) 374rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
370{ 375{
371 return &sge[1]; 376 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
377 return &sge[1];
378 else
379 return &sge[0];
372} 380}
373 381
374#endif 382#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 0964ac533ec8..1eb0c291a0b4 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -101,10 +101,13 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
101 if (event->param.conn.private_data_len >= sizeof(*dp)) { 101 if (event->param.conn.private_data_len >= sizeof(*dp)) {
102 dp = event->param.conn.private_data; 102 dp = event->param.conn.private_data;
103 103
104 rds_ib_set_protocol(conn, 104 /* make sure it isn't empty data */
105 if (dp->dp_protocol_major) {
106 rds_ib_set_protocol(conn,
105 RDS_PROTOCOL(dp->dp_protocol_major, 107 RDS_PROTOCOL(dp->dp_protocol_major,
106 dp->dp_protocol_minor)); 108 dp->dp_protocol_minor));
107 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); 109 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
110 }
108 } 111 }
109 112
110 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", 113 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 5709bad28329..28bdcdcdbfe9 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -555,6 +555,47 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
555 return rds_ib_get_ack(ic); 555 return rds_ib_get_ack(ic);
556} 556}
557 557
558static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
559 struct rds_ib_recv_work *recv,
560 u32 data_len)
561{
562 struct rds_ib_connection *ic = conn->c_transport_data;
563 void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
564 void *addr;
565 u32 misplaced_hdr_bytes;
566
567 /*
568 * Support header at the front (RDS 3.1+) as well as header-at-end.
569 *
570 * Cases:
571 * 1) header all in header buff (great!)
572 * 2) header all in data page (copy all to header buff)
573 * 3) header split across hdr buf + data page
574 * (move bit in hdr buff to end before copying other bit from data page)
575 */
576 if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
577 return hdr_buff;
578
579 if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
580 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
581 memcpy(hdr_buff,
582 addr + recv->r_frag->f_offset + data_len,
583 sizeof(struct rds_header));
584 kunmap_atomic(addr, KM_SOFTIRQ0);
585 return hdr_buff;
586 }
587
588 misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
589
590 memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
591
592 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
593 memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
594 sizeof(struct rds_header) - misplaced_hdr_bytes);
595 kunmap_atomic(addr, KM_SOFTIRQ0);
596 return hdr_buff;
597}
598
558/* 599/*
559 * It's kind of lame that we're copying from the posted receive pages into 600 * It's kind of lame that we're copying from the posted receive pages into
560 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 601 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -667,7 +708,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
667 } 708 }
668 byte_len -= sizeof(struct rds_header); 709 byte_len -= sizeof(struct rds_header);
669 710
670 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; 711 ihdr = rds_ib_get_header(conn, recv, byte_len);
671 712
672 /* Validate the checksum. */ 713 /* Validate the checksum. */
673 if (!rds_message_verify_checksum(ihdr)) { 714 if (!rds_message_verify_checksum(ihdr)) {