aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorShirley Ma <mashirle@us.ibm.com>2008-04-23 14:55:45 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-23 14:55:45 -0400
commitbc7b3a36ba02e4053ca38653e6a753082d9add03 (patch)
tree28fe1daa7cab6c7cab71bbc7af22ee6ca7746323 /drivers/infiniband/ulp
parentbc5698f3ecc9587e1edb343a2878f8d228c49e0e (diff)
IPoIB: Handle 4K IB MTU for UD (datagram) mode
This patch enables IPoIB to use 4K UD messages (when the underlying device and fabrics support a 4K MTU) by using two scatter buffers when PAGE_SIZE is less than or equal to thhe HCA IB MTU size. The first buffer is for IPoIB header + GRH header, and the second buffer is the IPoIB payload, which is 4K-4. Signed-off-by: Shirley Ma <xma@us.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c125
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
6 files changed, 134 insertions, 49 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73b2b176ad0e..f1f142dc64b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,11 +56,11 @@
56/* constants */ 56/* constants */
57 57
58enum { 58enum {
59 IPOIB_PACKET_SIZE = 2048,
60 IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
61
62 IPOIB_ENCAP_LEN = 4, 59 IPOIB_ENCAP_LEN = 4,
63 60
61 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
62 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
63
64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ 64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, 65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, 66 IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -139,7 +139,7 @@ struct ipoib_mcast {
139 139
140struct ipoib_rx_buf { 140struct ipoib_rx_buf {
141 struct sk_buff *skb; 141 struct sk_buff *skb;
142 u64 mapping; 142 u64 mapping[IPOIB_UD_RX_SG];
143}; 143};
144 144
145struct ipoib_tx_buf { 145struct ipoib_tx_buf {
@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
294 294
295 unsigned int admin_mtu; 295 unsigned int admin_mtu;
296 unsigned int mcast_mtu; 296 unsigned int mcast_mtu;
297 unsigned int max_ib_mtu;
297 298
298 struct ipoib_rx_buf *rx_ring; 299 struct ipoib_rx_buf *rx_ring;
299 300
@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
305 struct ib_send_wr tx_wr; 306 struct ib_send_wr tx_wr;
306 unsigned tx_outstanding; 307 unsigned tx_outstanding;
307 308
309 struct ib_recv_wr rx_wr;
310 struct ib_sge rx_sge[IPOIB_UD_RX_SG];
311
308 struct ib_wc ibwc[IPOIB_NUM_WC]; 312 struct ib_wc ibwc[IPOIB_NUM_WC];
309 313
310 struct list_head dead_ahs; 314 struct list_head dead_ahs;
@@ -366,6 +370,14 @@ struct ipoib_neigh {
366 struct list_head list; 370 struct list_head list;
367}; 371};
368 372
373#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
374#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
375
376static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
377{
378 return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
379}
380
369/* 381/*
370 * We stash a pointer to our private neighbour information after our 382 * We stash a pointer to our private neighbour information after our
371 * hardware address in neigh->ha. The ALIGN() expression here makes 383 * hardware address in neigh->ha. The ALIGN() expression here makes
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0205eb7c1bd3..7cf1fa7074ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
89 spin_unlock_irqrestore(&priv->lock, flags); 89 spin_unlock_irqrestore(&priv->lock, flags);
90} 90}
91 91
92static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
94{
95 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
96 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
97 DMA_FROM_DEVICE);
98 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
99 DMA_FROM_DEVICE);
100 } else
101 ib_dma_unmap_single(priv->ca, mapping[0],
102 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
103 DMA_FROM_DEVICE);
104}
105
106static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
107 struct sk_buff *skb,
108 unsigned int length)
109{
110 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
111 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
112 unsigned int size;
113 /*
114 * There is only two buffers needed for max_payload = 4K,
115 * first buf size is IPOIB_UD_HEAD_SIZE
116 */
117 skb->tail += IPOIB_UD_HEAD_SIZE;
118 skb->len += length;
119
120 size = length - IPOIB_UD_HEAD_SIZE;
121
122 frag->size = size;
123 skb->data_len += size;
124 skb->truesize += size;
125 } else
126 skb_put(skb, length);
127
128}
129
92static int ipoib_ib_post_receive(struct net_device *dev, int id) 130static int ipoib_ib_post_receive(struct net_device *dev, int id)
93{ 131{
94 struct ipoib_dev_priv *priv = netdev_priv(dev); 132 struct ipoib_dev_priv *priv = netdev_priv(dev);
95 struct ib_sge list;
96 struct ib_recv_wr param;
97 struct ib_recv_wr *bad_wr; 133 struct ib_recv_wr *bad_wr;
98 int ret; 134 int ret;
99 135
100 list.addr = priv->rx_ring[id].mapping; 136 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
101 list.length = IPOIB_BUF_SIZE; 137 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
102 list.lkey = priv->mr->lkey; 138 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
103 139
104 param.next = NULL;
105 param.wr_id = id | IPOIB_OP_RECV;
106 param.sg_list = &list;
107 param.num_sge = 1;
108 140
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 141 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
110 if (unlikely(ret)) { 142 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 143 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 144 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 dev_kfree_skb_any(priv->rx_ring[id].skb); 145 dev_kfree_skb_any(priv->rx_ring[id].skb);
115 priv->rx_ring[id].skb = NULL; 146 priv->rx_ring[id].skb = NULL;
116 } 147 }
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
118 return ret; 149 return ret;
119} 150}
120 151
121static int ipoib_alloc_rx_skb(struct net_device *dev, int id) 152static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
122{ 153{
123 struct ipoib_dev_priv *priv = netdev_priv(dev); 154 struct ipoib_dev_priv *priv = netdev_priv(dev);
124 struct sk_buff *skb; 155 struct sk_buff *skb;
125 u64 addr; 156 int buf_size;
157 u64 *mapping;
126 158
127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 159 if (ipoib_ud_need_sg(priv->max_ib_mtu))
128 if (!skb) 160 buf_size = IPOIB_UD_HEAD_SIZE;
129 return -ENOMEM; 161 else
162 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
163
164 skb = dev_alloc_skb(buf_size + 4);
165 if (unlikely(!skb))
166 return NULL;
130 167
131 /* 168 /*
132 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte 169 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
135 */ 172 */
136 skb_reserve(skb, 4); 173 skb_reserve(skb, 4);
137 174
138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 175 mapping = priv->rx_ring[id].mapping;
139 DMA_FROM_DEVICE); 176 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 177 DMA_FROM_DEVICE);
141 dev_kfree_skb_any(skb); 178 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
142 return -EIO; 179 goto error;
180
181 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
182 struct page *page = alloc_page(GFP_ATOMIC);
183 if (!page)
184 goto partial_error;
185 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
186 mapping[1] =
187 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
188 0, PAGE_SIZE, DMA_FROM_DEVICE);
189 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
190 goto partial_error;
143 } 191 }
144 192
145 priv->rx_ring[id].skb = skb; 193 priv->rx_ring[id].skb = skb;
146 priv->rx_ring[id].mapping = addr; 194 return skb;
147 195
148 return 0; 196partial_error:
197 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
198error:
199 dev_kfree_skb_any(skb);
200 return NULL;
149} 201}
150 202
151static int ipoib_ib_post_receives(struct net_device *dev) 203static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
154 int i; 206 int i;
155 207
156 for (i = 0; i < ipoib_recvq_size; ++i) { 208 for (i = 0; i < ipoib_recvq_size; ++i) {
157 if (ipoib_alloc_rx_skb(dev, i)) { 209 if (!ipoib_alloc_rx_skb(dev, i)) {
158 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 210 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
159 return -ENOMEM; 211 return -ENOMEM;
160 } 212 }
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
172 struct ipoib_dev_priv *priv = netdev_priv(dev); 224 struct ipoib_dev_priv *priv = netdev_priv(dev);
173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 225 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
174 struct sk_buff *skb; 226 struct sk_buff *skb;
175 u64 addr; 227 u64 mapping[IPOIB_UD_RX_SG];
176 228
177 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 229 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
178 wr_id, wc->status); 230 wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
184 } 236 }
185 237
186 skb = priv->rx_ring[wr_id].skb; 238 skb = priv->rx_ring[wr_id].skb;
187 addr = priv->rx_ring[wr_id].mapping;
188 239
189 if (unlikely(wc->status != IB_WC_SUCCESS)) { 240 if (unlikely(wc->status != IB_WC_SUCCESS)) {
190 if (wc->status != IB_WC_WR_FLUSH_ERR) 241 if (wc->status != IB_WC_WR_FLUSH_ERR)
191 ipoib_warn(priv, "failed recv event " 242 ipoib_warn(priv, "failed recv event "
192 "(status=%d, wrid=%d vend_err %x)\n", 243 "(status=%d, wrid=%d vend_err %x)\n",
193 wc->status, wr_id, wc->vendor_err); 244 wc->status, wr_id, wc->vendor_err);
194 ib_dma_unmap_single(priv->ca, addr, 245 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
196 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
197 priv->rx_ring[wr_id].skb = NULL; 247 priv->rx_ring[wr_id].skb = NULL;
198 return; 248 return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
205 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 255 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
206 goto repost; 256 goto repost;
207 257
258 memcpy(mapping, priv->rx_ring[wr_id].mapping,
259 IPOIB_UD_RX_SG * sizeof *mapping);
260
208 /* 261 /*
209 * If we can't allocate a new RX buffer, dump 262 * If we can't allocate a new RX buffer, dump
210 * this packet and reuse the old buffer. 263 * this packet and reuse the old buffer.
211 */ 264 */
212 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 265 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
213 ++dev->stats.rx_dropped; 266 ++dev->stats.rx_dropped;
214 goto repost; 267 goto repost;
215 } 268 }
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 270 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
218 wc->byte_len, wc->slid); 271 wc->byte_len, wc->slid);
219 272
220 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 273 ipoib_ud_dma_unmap_rx(priv, mapping);
274 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
221 275
222 skb_put(skb, wc->byte_len);
223 skb_pull(skb, IB_GRH_BYTES); 276 skb_pull(skb, IB_GRH_BYTES);
224 277
225 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 278 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
733 rx_req = &priv->rx_ring[i]; 786 rx_req = &priv->rx_ring[i];
734 if (!rx_req->skb) 787 if (!rx_req->skb)
735 continue; 788 continue;
736 ib_dma_unmap_single(priv->ca, 789 ipoib_ud_dma_unmap_rx(priv,
737 rx_req->mapping, 790 priv->rx_ring[i].mapping);
738 IPOIB_BUF_SIZE,
739 DMA_FROM_DEVICE);
740 dev_kfree_skb_any(rx_req->skb); 791 dev_kfree_skb_any(rx_req->skb);
741 rx_req->skb = NULL; 792 rx_req->skb = NULL;
742 } 793 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bd07f02cf02b..7a4ed9d3d844 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
195 return 0; 195 return 0;
196 } 196 }
197 197
198 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 198 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
199 return -EINVAL; 199 return -EINVAL;
200 200
201 priv->admin_mtu = new_mtu; 201 priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
971 NETIF_F_LLTX | 971 NETIF_F_LLTX |
972 NETIF_F_HIGHDMA); 972 NETIF_F_HIGHDMA);
973 973
974 /* MTU will be reset when mcast join happens */
975 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
976 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
977
978 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 974 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
979 975
980 netif_carrier_off(dev); 976 netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
1107{ 1103{
1108 struct ipoib_dev_priv *priv; 1104 struct ipoib_dev_priv *priv;
1109 struct ib_device_attr *device_attr; 1105 struct ib_device_attr *device_attr;
1106 struct ib_port_attr attr;
1110 int result = -ENOMEM; 1107 int result = -ENOMEM;
1111 1108
1112 priv = ipoib_intf_alloc(format); 1109 priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
1115 1112
1116 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1113 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1117 1114
1115 if (!ib_query_port(hca, port, &attr))
1116 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1117 else {
1118 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1119 hca->name, port);
1120 goto device_init_failed;
1121 }
1122
1123 /* MTU will be reset when mcast join happens */
1124 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1125 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1126
1118 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1127 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1119 if (result) { 1128 if (result) {
1120 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1129 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 31a53c5bcb13..d00a2c174aee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
567 return; 567 return;
568 } 568 }
569 569
570 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - 570 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
571 IPOIB_ENCAP_LEN;
572 571
573 if (!ipoib_cm_admin_enabled(dev)) 572 if (!ipoib_cm_admin_enabled(dev))
574 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 573 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 8a20e3742c43..07c03f178a49 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
150 .max_send_wr = ipoib_sendq_size, 150 .max_send_wr = ipoib_sendq_size,
151 .max_recv_wr = ipoib_recvq_size, 151 .max_recv_wr = ipoib_recvq_size,
152 .max_send_sge = 1, 152 .max_send_sge = 1,
153 .max_recv_sge = 1 153 .max_recv_sge = IPOIB_UD_RX_SG
154 }, 154 },
155 .sq_sig_type = IB_SIGNAL_ALL_WR, 155 .sq_sig_type = IB_SIGNAL_ALL_WR,
156 .qp_type = IB_QPT_UD 156 .qp_type = IB_QPT_UD
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
215 priv->tx_wr.sg_list = priv->tx_sge; 215 priv->tx_wr.sg_list = priv->tx_sge;
216 priv->tx_wr.send_flags = IB_SEND_SIGNALED; 216 priv->tx_wr.send_flags = IB_SEND_SIGNALED;
217 217
218 priv->rx_sge[0].lkey = priv->mr->lkey;
219 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
220 priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
221 priv->rx_sge[1].length = PAGE_SIZE;
222 priv->rx_sge[1].lkey = priv->mr->lkey;
223 priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
224 } else {
225 priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
226 priv->rx_wr.num_sge = 1;
227 }
228 priv->rx_wr.next = NULL;
229 priv->rx_wr.sg_list = priv->rx_sge;
230
218 return 0; 231 return 0;
219 232
220out_free_cq: 233out_free_cq:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 293f5b892e3f..431fdeaa2dc4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
89 goto err; 89 goto err;
90 } 90 }
91 91
92 priv->max_ib_mtu = ppriv->max_ib_mtu;
92 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
93 94
94 priv->pkey = pkey; 95 priv->pkey = pkey;