aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/en_rx.c')
-rw-r--r--drivers/net/mlx4/en_rx.c96
1 files changed, 0 insertions, 96 deletions
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899c1e25..91bdfdfd431f 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@ reduce_rings:
269 return 0; 269 return 0;
270} 270}
271 271
272static int mlx4_en_fill_rx_buf(struct net_device *dev,
273 struct mlx4_en_rx_ring *ring)
274{
275 struct mlx4_en_priv *priv = netdev_priv(dev);
276 int num = 0;
277 int err;
278
279 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
280 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
281 ring->size_mask);
282 if (err) {
283 if (netif_msg_rx_err(priv))
284 en_warn(priv, "Failed preparing rx descriptor\n");
285 priv->port_stats.rx_alloc_failed++;
286 break;
287 }
288 ++num;
289 ++ring->prod;
290 }
291 if ((u32) (ring->prod - ring->cons) == ring->actual_size)
292 ring->full = 1;
293
294 return num;
295}
296
297static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 272static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
298 struct mlx4_en_rx_ring *ring) 273 struct mlx4_en_rx_ring *ring)
299{ 274{
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
312 } 287 }
313} 288}
314 289
315
316void mlx4_en_rx_refill(struct work_struct *work)
317{
318 struct delayed_work *delay = to_delayed_work(work);
319 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
320 refill_task);
321 struct mlx4_en_dev *mdev = priv->mdev;
322 struct net_device *dev = priv->dev;
323 struct mlx4_en_rx_ring *ring;
324 int need_refill = 0;
325 int i;
326
327 mutex_lock(&mdev->state_lock);
328 if (!mdev->device_up || !priv->port_up)
329 goto out;
330
331 /* We only get here if there are no receive buffers, so we can't race
332 * with Rx interrupts while filling buffers */
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 ring = &priv->rx_ring[i];
335 if (ring->need_refill) {
336 if (mlx4_en_fill_rx_buf(dev, ring)) {
337 ring->need_refill = 0;
338 mlx4_en_update_rx_prod_db(ring);
339 } else
340 need_refill = 1;
341 }
342 }
343 if (need_refill)
344 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
345
346out:
347 mutex_unlock(&mdev->state_lock);
348}
349
350
351int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 290int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
352 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 291 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
353{ 292{
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
457 ring_ind--; 396 ring_ind--;
458 goto err_allocator; 397 goto err_allocator;
459 } 398 }
460
461 /* Fill Rx buffers */
462 ring->full = 0;
463 } 399 }
464 err = mlx4_en_fill_rx_buffers(priv); 400 err = mlx4_en_fill_rx_buffers(priv);
465 if (err) 401 if (err)
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
647 return skb; 583 return skb;
648} 584}
649 585
650static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
651 struct mlx4_en_rx_ring *ring,
652 int from, int to, int num)
653{
654 struct skb_frag_struct *skb_frags_from;
655 struct skb_frag_struct *skb_frags_to;
656 struct mlx4_en_rx_desc *rx_desc_from;
657 struct mlx4_en_rx_desc *rx_desc_to;
658 int from_index, to_index;
659 int nr, i;
660
661 for (i = 0; i < num; i++) {
662 from_index = (from + i) & ring->size_mask;
663 to_index = (to + i) & ring->size_mask;
664 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
665 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
666 rx_desc_from = ring->buf + (from_index << ring->log_stride);
667 rx_desc_to = ring->buf + (to_index << ring->log_stride);
668
669 for (nr = 0; nr < priv->num_frags; nr++) {
670 skb_frags_to[nr].page = skb_frags_from[nr].page;
671 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
672 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
673 }
674 }
675}
676
677 586
678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 587int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
679{ 588{
@@ -821,11 +730,6 @@ out:
821 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 730 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
822 ring->cons = cq->mcq.cons_index; 731 ring->cons = cq->mcq.cons_index;
823 ring->prod += polled; /* Polled descriptors were realocated in place */ 732 ring->prod += polled; /* Polled descriptors were realocated in place */
824 if (unlikely(!ring->full)) {
825 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
826 ring->prod - polled, polled);
827 mlx4_en_fill_rx_buf(dev, ring);
828 }
829 mlx4_en_update_rx_prod_db(ring); 733 mlx4_en_update_rx_prod_db(ring);
830 return polled; 734 return polled;
831} 735}