diff options
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_ib.c')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ed65202878d8..a54da42849ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -161,7 +161,7 @@ static int ipoib_ib_post_receives(struct net_device *dev) | |||
161 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 161 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
162 | int i; | 162 | int i; |
163 | 163 | ||
164 | for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) { | 164 | for (i = 0; i < ipoib_recvq_size; ++i) { |
165 | if (ipoib_alloc_rx_skb(dev, i)) { | 165 | if (ipoib_alloc_rx_skb(dev, i)) { |
166 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 166 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
167 | return -ENOMEM; | 167 | return -ENOMEM; |
@@ -187,7 +187,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev, | |||
187 | if (wr_id & IPOIB_OP_RECV) { | 187 | if (wr_id & IPOIB_OP_RECV) { |
188 | wr_id &= ~IPOIB_OP_RECV; | 188 | wr_id &= ~IPOIB_OP_RECV; |
189 | 189 | ||
190 | if (wr_id < IPOIB_RX_RING_SIZE) { | 190 | if (wr_id < ipoib_recvq_size) { |
191 | struct sk_buff *skb = priv->rx_ring[wr_id].skb; | 191 | struct sk_buff *skb = priv->rx_ring[wr_id].skb; |
192 | dma_addr_t addr = priv->rx_ring[wr_id].mapping; | 192 | dma_addr_t addr = priv->rx_ring[wr_id].mapping; |
193 | 193 | ||
@@ -252,9 +252,9 @@ static void ipoib_ib_handle_wc(struct net_device *dev, | |||
252 | struct ipoib_tx_buf *tx_req; | 252 | struct ipoib_tx_buf *tx_req; |
253 | unsigned long flags; | 253 | unsigned long flags; |
254 | 254 | ||
255 | if (wr_id >= IPOIB_TX_RING_SIZE) { | 255 | if (wr_id >= ipoib_sendq_size) { |
256 | ipoib_warn(priv, "completion event with wrid %d (> %d)\n", | 256 | ipoib_warn(priv, "completion event with wrid %d (> %d)\n", |
257 | wr_id, IPOIB_TX_RING_SIZE); | 257 | wr_id, ipoib_sendq_size); |
258 | return; | 258 | return; |
259 | } | 259 | } |
260 | 260 | ||
@@ -275,7 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev, | |||
275 | spin_lock_irqsave(&priv->tx_lock, flags); | 275 | spin_lock_irqsave(&priv->tx_lock, flags); |
276 | ++priv->tx_tail; | 276 | ++priv->tx_tail; |
277 | if (netif_queue_stopped(dev) && | 277 | if (netif_queue_stopped(dev) && |
278 | priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) | 278 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) |
279 | netif_wake_queue(dev); | 279 | netif_wake_queue(dev); |
280 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 280 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
281 | 281 | ||
@@ -344,13 +344,13 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
344 | * means we have to make sure everything is properly recorded and | 344 | * means we have to make sure everything is properly recorded and |
345 | * our state is consistent before we call post_send(). | 345 | * our state is consistent before we call post_send(). |
346 | */ | 346 | */ |
347 | tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)]; | 347 | tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; |
348 | tx_req->skb = skb; | 348 | tx_req->skb = skb; |
349 | addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, | 349 | addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, |
350 | DMA_TO_DEVICE); | 350 | DMA_TO_DEVICE); |
351 | pci_unmap_addr_set(tx_req, mapping, addr); | 351 | pci_unmap_addr_set(tx_req, mapping, addr); |
352 | 352 | ||
353 | if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1), | 353 | if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), |
354 | address->ah, qpn, addr, skb->len))) { | 354 | address->ah, qpn, addr, skb->len))) { |
355 | ipoib_warn(priv, "post_send failed\n"); | 355 | ipoib_warn(priv, "post_send failed\n"); |
356 | ++priv->stats.tx_errors; | 356 | ++priv->stats.tx_errors; |
@@ -363,7 +363,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
363 | address->last_send = priv->tx_head; | 363 | address->last_send = priv->tx_head; |
364 | ++priv->tx_head; | 364 | ++priv->tx_head; |
365 | 365 | ||
366 | if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) { | 366 | if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) { |
367 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); | 367 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); |
368 | netif_stop_queue(dev); | 368 | netif_stop_queue(dev); |
369 | } | 369 | } |
@@ -488,7 +488,7 @@ static int recvs_pending(struct net_device *dev) | |||
488 | int pending = 0; | 488 | int pending = 0; |
489 | int i; | 489 | int i; |
490 | 490 | ||
491 | for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) | 491 | for (i = 0; i < ipoib_recvq_size; ++i) |
492 | if (priv->rx_ring[i].skb) | 492 | if (priv->rx_ring[i].skb) |
493 | ++pending; | 493 | ++pending; |
494 | 494 | ||
@@ -527,7 +527,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) | |||
527 | */ | 527 | */ |
528 | while ((int) priv->tx_tail - (int) priv->tx_head < 0) { | 528 | while ((int) priv->tx_tail - (int) priv->tx_head < 0) { |
529 | tx_req = &priv->tx_ring[priv->tx_tail & | 529 | tx_req = &priv->tx_ring[priv->tx_tail & |
530 | (IPOIB_TX_RING_SIZE - 1)]; | 530 | (ipoib_sendq_size - 1)]; |
531 | dma_unmap_single(priv->ca->dma_device, | 531 | dma_unmap_single(priv->ca->dma_device, |
532 | pci_unmap_addr(tx_req, mapping), | 532 | pci_unmap_addr(tx_req, mapping), |
533 | tx_req->skb->len, | 533 | tx_req->skb->len, |
@@ -536,7 +536,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) | |||
536 | ++priv->tx_tail; | 536 | ++priv->tx_tail; |
537 | } | 537 | } |
538 | 538 | ||
539 | for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) | 539 | for (i = 0; i < ipoib_recvq_size; ++i) |
540 | if (priv->rx_ring[i].skb) { | 540 | if (priv->rx_ring[i].skb) { |
541 | dma_unmap_single(priv->ca->dma_device, | 541 | dma_unmap_single(priv->ca->dma_device, |
542 | pci_unmap_addr(&priv->rx_ring[i], | 542 | pci_unmap_addr(&priv->rx_ring[i], |