aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_recv.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2009-07-17 09:13:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-20 11:03:03 -0400
commit02a6a2592e41d27644d647f3bce23598649961bc (patch)
tree7455566e6f3ff13279bb5949f06e398dbb8cff1a /net/rds/ib_recv.c
parent9ddbcfa098bae757d3760dd1dbf2847a0bd5a525 (diff)
RDS/IB: Handle connections using RDS 3.0 wire protocol
The big differences between RDS 3.0 and 3.1 are protocol-level flow control, and with 3.1 the header is in front of the data. The header always ends up in the header buffer, and the data goes in the data page. In 3.0 our "header" is a trailer, and will end up either in the data page, the header buffer, or split across the two. Since 3.1 is backwards- compatible with 3.0, we need to continue to support these cases. This patch does that -- if using RDS 3.0 wire protocol, it will copy the header from wherever it ended up into the header buffer. Signed-off-by: Andy Grover <andy.grover@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r--net/rds/ib_recv.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 5709bad28329..28bdcdcdbfe9 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -555,6 +555,47 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
555 return rds_ib_get_ack(ic); 555 return rds_ib_get_ack(ic);
556} 556}
557 557
558static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
559 struct rds_ib_recv_work *recv,
560 u32 data_len)
561{
562 struct rds_ib_connection *ic = conn->c_transport_data;
563 void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
564 void *addr;
565 u32 misplaced_hdr_bytes;
566
567 /*
568 * Support header at the front (RDS 3.1+) as well as header-at-end.
569 *
570 * Cases:
571 * 1) header all in header buff (great!)
572 * 2) header all in data page (copy all to header buff)
573 * 3) header split across hdr buf + data page
574 * (move bit in hdr buff to end before copying other bit from data page)
575 */
576 if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
577 return hdr_buff;
578
579 if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
580 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
581 memcpy(hdr_buff,
582 addr + recv->r_frag->f_offset + data_len,
583 sizeof(struct rds_header));
584 kunmap_atomic(addr, KM_SOFTIRQ0);
585 return hdr_buff;
586 }
587
588 misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
589
590 memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
591
592 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
593 memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
594 sizeof(struct rds_header) - misplaced_hdr_bytes);
595 kunmap_atomic(addr, KM_SOFTIRQ0);
596 return hdr_buff;
597}
598
558/* 599/*
559 * It's kind of lame that we're copying from the posted receive pages into 600 * It's kind of lame that we're copying from the posted receive pages into
560 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 601 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -667,7 +708,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
667 } 708 }
668 byte_len -= sizeof(struct rds_header); 709 byte_len -= sizeof(struct rds_header);
669 710
670 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; 711 ihdr = rds_ib_get_header(conn, recv, byte_len);
671 712
672 /* Validate the checksum. */ 713 /* Validate the checksum. */
673 if (!rds_message_verify_checksum(ihdr)) { 714 if (!rds_message_verify_checksum(ihdr)) {