aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-12-12 17:30:48 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 17:30:48 -0500
commit37ccf9df974f55e99bf21278133b065cbbcf3f79 (patch)
tree4298f7759b810aa8671f1f92ae38a2669b8b62e9 /drivers
parent1527106ff8cf6afb15f68c8820605a0d32263173 (diff)
IPoIB: Use the new verbs DMA mapping functions
Convert IPoIB to use the new DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c75
2 files changed, 38 insertions, 41 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 99547996aba2..07deee8f81ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
105 105
106struct ipoib_rx_buf { 106struct ipoib_rx_buf {
107 struct sk_buff *skb; 107 struct sk_buff *skb;
108 dma_addr_t mapping; 108 u64 mapping;
109}; 109};
110 110
111struct ipoib_tx_buf { 111struct ipoib_tx_buf {
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 DECLARE_PCI_UNMAP_ADDR(mapping) 113 u64 mapping;
114}; 114};
115 115
116/* 116/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10fba5d3265..59d9594ed6d9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 109 ret = ib_post_recv(priv->qp, &param, &bad_wr);
110 if (unlikely(ret)) { 110 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 dma_unmap_single(priv->ca->dma_device, 112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
113 priv->rx_ring[id].mapping, 113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
115 dev_kfree_skb_any(priv->rx_ring[id].skb); 114 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL; 115 priv->rx_ring[id].skb = NULL;
117 } 116 }
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
123{ 122{
124 struct ipoib_dev_priv *priv = netdev_priv(dev); 123 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 struct sk_buff *skb; 124 struct sk_buff *skb;
126 dma_addr_t addr; 125 u64 addr;
127 126
128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
129 if (!skb) 128 if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
136 */ 135 */
137 skb_reserve(skb, 4); 136 skb_reserve(skb, 4);
138 137
139 addr = dma_map_single(priv->ca->dma_device, 138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
140 skb->data, IPOIB_BUF_SIZE, 139 DMA_FROM_DEVICE);
141 DMA_FROM_DEVICE); 140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
142 if (unlikely(dma_mapping_error(addr))) {
143 dev_kfree_skb_any(skb); 141 dev_kfree_skb_any(skb);
144 return -EIO; 142 return -EIO;
145 } 143 }
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 struct ipoib_dev_priv *priv = netdev_priv(dev); 172 struct ipoib_dev_priv *priv = netdev_priv(dev);
175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb; 174 struct sk_buff *skb;
177 dma_addr_t addr; 175 u64 addr;
178 176
179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 177 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
180 wr_id, wc->opcode, wc->status); 178 wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
193 ipoib_warn(priv, "failed recv event " 191 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n", 192 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err); 193 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr, 194 ib_dma_unmap_single(priv->ca, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb); 196 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL; 197 priv->rx_ring[wr_id].skb = NULL;
200 return; 198 return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 210 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
213 wc->byte_len, wc->slid); 211 wc->byte_len, wc->slid);
214 212
215 dma_unmap_single(priv->ca->dma_device, addr, 213 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
217 214
218 skb_put(skb, wc->byte_len); 215 skb_put(skb, wc->byte_len);
219 skb_pull(skb, IB_GRH_BYTES); 216 skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
261 258
262 tx_req = &priv->tx_ring[wr_id]; 259 tx_req = &priv->tx_ring[wr_id];
263 260
264 dma_unmap_single(priv->ca->dma_device, 261 ib_dma_unmap_single(priv->ca, tx_req->mapping,
265 pci_unmap_addr(tx_req, mapping), 262 tx_req->skb->len, DMA_TO_DEVICE);
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 263
269 ++priv->stats.tx_packets; 264 ++priv->stats.tx_packets;
270 priv->stats.tx_bytes += tx_req->skb->len; 265 priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
311static inline int post_send(struct ipoib_dev_priv *priv, 306static inline int post_send(struct ipoib_dev_priv *priv,
312 unsigned int wr_id, 307 unsigned int wr_id,
313 struct ib_ah *address, u32 qpn, 308 struct ib_ah *address, u32 qpn,
314 dma_addr_t addr, int len) 309 u64 addr, int len)
315{ 310{
316 struct ib_send_wr *bad_wr; 311 struct ib_send_wr *bad_wr;
317 312
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
330{ 325{
331 struct ipoib_dev_priv *priv = netdev_priv(dev); 326 struct ipoib_dev_priv *priv = netdev_priv(dev);
332 struct ipoib_tx_buf *tx_req; 327 struct ipoib_tx_buf *tx_req;
333 dma_addr_t addr; 328 u64 addr;
334 329
335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 330 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 331 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
353 */ 348 */
354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 349 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
355 tx_req->skb = skb; 350 tx_req->skb = skb;
356 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 351 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
357 DMA_TO_DEVICE); 352 DMA_TO_DEVICE);
358 if (unlikely(dma_mapping_error(addr))) { 353 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
359 ++priv->stats.tx_errors; 354 ++priv->stats.tx_errors;
360 dev_kfree_skb_any(skb); 355 dev_kfree_skb_any(skb);
361 return; 356 return;
362 } 357 }
363 pci_unmap_addr_set(tx_req, mapping, addr); 358 tx_req->mapping = addr;
364 359
365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 360 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
366 address->ah, qpn, addr, skb->len))) { 361 address->ah, qpn, addr, skb->len))) {
367 ipoib_warn(priv, "post_send failed\n"); 362 ipoib_warn(priv, "post_send failed\n");
368 ++priv->stats.tx_errors; 363 ++priv->stats.tx_errors;
369 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 364 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
370 DMA_TO_DEVICE);
371 dev_kfree_skb_any(skb); 365 dev_kfree_skb_any(skb);
372 } else { 366 } else {
373 dev->trans_start = jiffies; 367 dev->trans_start = jiffies;
@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 532 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
539 tx_req = &priv->tx_ring[priv->tx_tail & 533 tx_req = &priv->tx_ring[priv->tx_tail &
540 (ipoib_sendq_size - 1)]; 534 (ipoib_sendq_size - 1)];
541 dma_unmap_single(priv->ca->dma_device, 535 ib_dma_unmap_single(priv->ca,
542 pci_unmap_addr(tx_req, mapping), 536 tx_req->mapping,
543 tx_req->skb->len, 537 tx_req->skb->len,
544 DMA_TO_DEVICE); 538 DMA_TO_DEVICE);
545 dev_kfree_skb_any(tx_req->skb); 539 dev_kfree_skb_any(tx_req->skb);
546 ++priv->tx_tail; 540 ++priv->tx_tail;
547 } 541 }
548 542
549 for (i = 0; i < ipoib_recvq_size; ++i) 543 for (i = 0; i < ipoib_recvq_size; ++i) {
550 if (priv->rx_ring[i].skb) { 544 struct ipoib_rx_buf *rx_req;
551 dma_unmap_single(priv->ca->dma_device, 545
552 pci_unmap_addr(&priv->rx_ring[i], 546 rx_req = &priv->rx_ring[i];
553 mapping), 547 if (!rx_req->skb)
554 IPOIB_BUF_SIZE, 548 continue;
555 DMA_FROM_DEVICE); 549 ib_dma_unmap_single(priv->ca,
556 dev_kfree_skb_any(priv->rx_ring[i].skb); 550 rx_req->mapping,
557 priv->rx_ring[i].skb = NULL; 551 IPOIB_BUF_SIZE,
558 } 552 DMA_FROM_DEVICE);
553 dev_kfree_skb_any(rx_req->skb);
554 rx_req->skb = NULL;
555 }
559 556
560 goto timeout; 557 goto timeout;
561 } 558 }