aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2006-09-22 18:22:52 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-22 18:22:52 -0400
commit2439a6e65ff09729c3b4215f134dc5cd4e8a30c0 (patch)
treea8c85db68ed2bc15be60e9a713ac7a73c5c40796 /drivers/infiniband
parentd81110285f7f6c07a0ce8f99a5ff158a647cd649 (diff)
IPoIB: Refactor completion handling
Split up ipoib_ib_handle_wc() into ipoib_ib_handle_rx_wc() and ipoib_ib_handle_tx_wc() to make the code easier to read. This will also help implement NAPI in the future. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c188
1 files changed, 100 insertions, 88 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5033666b1481..722177ea069b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev)
169 return 0; 169 return 0;
170} 170}
171 171
172static void ipoib_ib_handle_wc(struct net_device *dev, 172static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
173 struct ib_wc *wc)
174{ 173{
175 struct ipoib_dev_priv *priv = netdev_priv(dev); 174 struct ipoib_dev_priv *priv = netdev_priv(dev);
176 unsigned int wr_id = wc->wr_id; 175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb;
177 dma_addr_t addr;
177 178
178 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", 179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
179 wr_id, wc->opcode, wc->status); 180 wr_id, wc->opcode, wc->status);
180 181
181 if (wr_id & IPOIB_OP_RECV) { 182 if (unlikely(wr_id >= ipoib_recvq_size)) {
182 wr_id &= ~IPOIB_OP_RECV; 183 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
183 184 wr_id, ipoib_recvq_size);
184 if (wr_id < ipoib_recvq_size) { 185 return;
185 struct sk_buff *skb = priv->rx_ring[wr_id].skb; 186 }
186 dma_addr_t addr = priv->rx_ring[wr_id].mapping;
187
188 if (unlikely(wc->status != IB_WC_SUCCESS)) {
189 if (wc->status != IB_WC_WR_FLUSH_ERR)
190 ipoib_warn(priv, "failed recv event "
191 "(status=%d, wrid=%d vend_err %x)\n",
192 wc->status, wr_id, wc->vendor_err);
193 dma_unmap_single(priv->ca->dma_device, addr,
194 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
195 dev_kfree_skb_any(skb);
196 priv->rx_ring[wr_id].skb = NULL;
197 return;
198 }
199 187
200 /* 188 skb = priv->rx_ring[wr_id].skb;
201 * If we can't allocate a new RX buffer, dump 189 addr = priv->rx_ring[wr_id].mapping;
202 * this packet and reuse the old buffer.
203 */
204 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
205 ++priv->stats.rx_dropped;
206 goto repost;
207 }
208 190
209 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 191 if (unlikely(wc->status != IB_WC_SUCCESS)) {
210 wc->byte_len, wc->slid); 192 if (wc->status != IB_WC_WR_FLUSH_ERR)
193 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL;
200 return;
201 }
211 202
212 dma_unmap_single(priv->ca->dma_device, addr, 203 /*
213 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 204 * If we can't allocate a new RX buffer, dump
205 * this packet and reuse the old buffer.
206 */
207 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
208 ++priv->stats.rx_dropped;
209 goto repost;
210 }
214 211
215 skb_put(skb, wc->byte_len); 212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 skb_pull(skb, IB_GRH_BYTES); 213 wc->byte_len, wc->slid);
217 214
218 if (wc->slid != priv->local_lid || 215 dma_unmap_single(priv->ca->dma_device, addr,
219 wc->src_qp != priv->qp->qp_num) { 216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
220 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
221 skb->mac.raw = skb->data;
222 skb_pull(skb, IPOIB_ENCAP_LEN);
223 217
224 dev->last_rx = jiffies; 218 skb_put(skb, wc->byte_len);
225 ++priv->stats.rx_packets; 219 skb_pull(skb, IB_GRH_BYTES);
226 priv->stats.rx_bytes += skb->len;
227 220
228 skb->dev = dev; 221 if (wc->slid != priv->local_lid ||
229 /* XXX get correct PACKET_ type here */ 222 wc->src_qp != priv->qp->qp_num) {
230 skb->pkt_type = PACKET_HOST; 223 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
231 netif_rx_ni(skb); 224 skb->mac.raw = skb->data;
232 } else { 225 skb_pull(skb, IPOIB_ENCAP_LEN);
233 ipoib_dbg_data(priv, "dropping loopback packet\n");
234 dev_kfree_skb_any(skb);
235 }
236 226
237 repost: 227 dev->last_rx = jiffies;
238 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 228 ++priv->stats.rx_packets;
239 ipoib_warn(priv, "ipoib_ib_post_receive failed " 229 priv->stats.rx_bytes += skb->len;
240 "for buf %d\n", wr_id);
241 } else
242 ipoib_warn(priv, "completion event with wrid %d\n",
243 wr_id);
244 230
231 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST;
234 netif_rx_ni(skb);
245 } else { 235 } else {
246 struct ipoib_tx_buf *tx_req; 236 ipoib_dbg_data(priv, "dropping loopback packet\n");
247 unsigned long flags; 237 dev_kfree_skb_any(skb);
238 }
248 239
249 if (wr_id >= ipoib_sendq_size) { 240repost:
250 ipoib_warn(priv, "completion event with wrid %d (> %d)\n", 241 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
251 wr_id, ipoib_sendq_size); 242 ipoib_warn(priv, "ipoib_ib_post_receive failed "
252 return; 243 "for buf %d\n", wr_id);
253 } 244}
254 245
255 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); 246static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
247{
248 struct ipoib_dev_priv *priv = netdev_priv(dev);
249 unsigned int wr_id = wc->wr_id;
250 struct ipoib_tx_buf *tx_req;
251 unsigned long flags;
256 252
257 tx_req = &priv->tx_ring[wr_id]; 253 ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
254 wr_id, wc->opcode, wc->status);
258 255
259 dma_unmap_single(priv->ca->dma_device, 256 if (unlikely(wr_id >= ipoib_sendq_size)) {
260 pci_unmap_addr(tx_req, mapping), 257 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
261 tx_req->skb->len, 258 wr_id, ipoib_sendq_size);
262 DMA_TO_DEVICE); 259 return;
260 }
263 261
264 ++priv->stats.tx_packets; 262 tx_req = &priv->tx_ring[wr_id];
265 priv->stats.tx_bytes += tx_req->skb->len;
266 263
267 dev_kfree_skb_any(tx_req->skb); 264 dma_unmap_single(priv->ca->dma_device,
265 pci_unmap_addr(tx_req, mapping),
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 268
269 spin_lock_irqsave(&priv->tx_lock, flags); 269 ++priv->stats.tx_packets;
270 ++priv->tx_tail; 270 priv->stats.tx_bytes += tx_req->skb->len;
271 if (netif_queue_stopped(dev) &&
272 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
273 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
274 netif_wake_queue(dev);
275 spin_unlock_irqrestore(&priv->tx_lock, flags);
276 271
277 if (wc->status != IB_WC_SUCCESS && 272 dev_kfree_skb_any(tx_req->skb);
278 wc->status != IB_WC_WR_FLUSH_ERR) 273
279 ipoib_warn(priv, "failed send event " 274 spin_lock_irqsave(&priv->tx_lock, flags);
280 "(status=%d, wrid=%d vend_err %x)\n", 275 ++priv->tx_tail;
281 wc->status, wr_id, wc->vendor_err); 276 if (netif_queue_stopped(dev) &&
282 } 277 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
278 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
279 netif_wake_queue(dev);
280 spin_unlock_irqrestore(&priv->tx_lock, flags);
281
282 if (wc->status != IB_WC_SUCCESS &&
283 wc->status != IB_WC_WR_FLUSH_ERR)
284 ipoib_warn(priv, "failed send event "
285 "(status=%d, wrid=%d vend_err %x)\n",
286 wc->status, wr_id, wc->vendor_err);
287}
288
289static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
290{
291 if (wc->wr_id & IPOIB_OP_RECV)
292 ipoib_ib_handle_rx_wc(dev, wc);
293 else
294 ipoib_ib_handle_tx_wc(dev, wc);
283} 295}
284 296
285void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 297void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)