aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2009-05-23 23:17:11 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-25 03:35:53 -0400
commit38aab07c14adbf3c7257793d764a91923341e96a (patch)
treeac27575621410056b4ec970db6b233a50600d15b /drivers/net/mlx4
parent8e29291650ee53a8609d9cc3a303dcbe9aa9b542 (diff)
mlx4_en: Fix partial rings feature
In case of allocation failure, the actual ring size is rounded down to nearest power of 2. The remaining descriptors are freed. The CQ and SRQ are allocated with the actual size and the mask is updated. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/en_cq.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c23
-rw-r--r--drivers/net/mlx4/en_rx.c71
3 files changed, 58 insertions, 39 deletions
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index a276125b709b..21786ad4455e 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -89,6 +89,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
89 *cq->mcq.arm_db = 0; 89 *cq->mcq.arm_db = 0;
90 memset(cq->buf, 0, cq->buf_size); 90 memset(cq->buf, 0, cq->buf_size);
91 91
92 if (!cq->is_tx)
93 cq->size = priv->rx_ring[cq->ring].actual_size;
94
92 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, 95 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
93 cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); 96 cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
94 if (err) 97 if (err)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index be487fa8d9a7..0cd185a2e089 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -556,7 +556,6 @@ int mlx4_en_start_port(struct net_device *dev)
556 struct mlx4_en_dev *mdev = priv->mdev; 556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_en_cq *cq; 557 struct mlx4_en_cq *cq;
558 struct mlx4_en_tx_ring *tx_ring; 558 struct mlx4_en_tx_ring *tx_ring;
559 struct mlx4_en_rx_ring *rx_ring;
560 int rx_index = 0; 559 int rx_index = 0;
561 int tx_index = 0; 560 int tx_index = 0;
562 int err = 0; 561 int err = 0;
@@ -572,10 +571,15 @@ int mlx4_en_start_port(struct net_device *dev)
572 dev->mtu = min(dev->mtu, priv->max_mtu); 571 dev->mtu = min(dev->mtu, priv->max_mtu);
573 mlx4_en_calc_rx_buf(dev); 572 mlx4_en_calc_rx_buf(dev);
574 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 573 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
574
575 /* Configure rx cq's and rings */ 575 /* Configure rx cq's and rings */
576 err = mlx4_en_activate_rx_rings(priv);
577 if (err) {
578 mlx4_err(mdev, "Failed to activate RX rings\n");
579 return err;
580 }
576 for (i = 0; i < priv->rx_ring_num; i++) { 581 for (i = 0; i < priv->rx_ring_num; i++) {
577 cq = &priv->rx_cq[i]; 582 cq = &priv->rx_cq[i];
578 rx_ring = &priv->rx_ring[i];
579 583
580 err = mlx4_en_activate_cq(priv, cq); 584 err = mlx4_en_activate_cq(priv, cq);
581 if (err) { 585 if (err) {
@@ -591,20 +595,14 @@ int mlx4_en_start_port(struct net_device *dev)
591 goto cq_err; 595 goto cq_err;
592 } 596 }
593 mlx4_en_arm_cq(priv, cq); 597 mlx4_en_arm_cq(priv, cq);
594 598 priv->rx_ring[i].cqn = cq->mcq.cqn;
595 ++rx_index; 599 ++rx_index;
596 } 600 }
597 601
598 err = mlx4_en_activate_rx_rings(priv);
599 if (err) {
600 mlx4_err(mdev, "Failed to activate RX rings\n");
601 goto cq_err;
602 }
603
604 err = mlx4_en_config_rss_steer(priv); 602 err = mlx4_en_config_rss_steer(priv);
605 if (err) { 603 if (err) {
606 mlx4_err(mdev, "Failed configuring rss steering\n"); 604 mlx4_err(mdev, "Failed configuring rss steering\n");
607 goto rx_err; 605 goto cq_err;
608 } 606 }
609 607
610 /* Configure tx cq's and rings */ 608 /* Configure tx cq's and rings */
@@ -691,12 +689,11 @@ tx_err:
691 } 689 }
692 690
693 mlx4_en_release_rss_steer(priv); 691 mlx4_en_release_rss_steer(priv);
694rx_err:
695 for (i = 0; i < priv->rx_ring_num; i++)
696 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
697cq_err: 692cq_err:
698 while (rx_index--) 693 while (rx_index--)
699 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 694 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
695 for (i = 0; i < priv->rx_ring_num; i++)
696 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
700 697
701 return err; /* need to close devices */ 698 return err; /* need to close devices */
702} 699}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 9ee873e872b3..6bfab6e5ba1d 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -202,12 +202,35 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203} 203}
204 204
205static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
206 struct mlx4_en_rx_ring *ring,
207 int index)
208{
209 struct mlx4_en_dev *mdev = priv->mdev;
210 struct skb_frag_struct *skb_frags;
211 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
212 dma_addr_t dma;
213 int nr;
214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page);
224 }
225}
226
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{ 228{
207 struct mlx4_en_dev *mdev = priv->mdev; 229 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring; 230 struct mlx4_en_rx_ring *ring;
209 int ring_ind; 231 int ring_ind;
210 int buf_ind; 232 int buf_ind;
233 int new_size;
211 234
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 235 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 236 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -220,18 +243,30 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
220 "enough rx buffers\n"); 243 "enough rx buffers\n");
221 return -ENOMEM; 244 return -ENOMEM;
222 } else { 245 } else {
223 if (netif_msg_rx_err(priv)) 246 new_size = rounddown_pow_of_two(ring->actual_size);
224 mlx4_warn(mdev, 247 mlx4_warn(mdev, "Only %d buffers allocated "
225 "Only %d buffers allocated\n", 248 "reducing ring size to %d",
226 ring->actual_size); 249 ring->actual_size, new_size);
227 goto out; 250 goto reduce_rings;
228 } 251 }
229 } 252 }
230 ring->actual_size++; 253 ring->actual_size++;
231 ring->prod++; 254 ring->prod++;
232 } 255 }
233 } 256 }
234out: 257 return 0;
258
259reduce_rings:
260 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
261 ring = &priv->rx_ring[ring_ind];
262 while (ring->actual_size > new_size) {
263 ring->actual_size--;
264 ring->prod--;
265 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
266 }
267 ring->size_mask = ring->actual_size - 1;
268 }
269
235 return 0; 270 return 0;
236} 271}
237 272
@@ -255,7 +290,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
255 ++num; 290 ++num;
256 ++ring->prod; 291 ++ring->prod;
257 } 292 }
258 if ((u32) (ring->prod - ring->cons) == ring->size) 293 if ((u32) (ring->prod - ring->cons) == ring->actual_size)
259 ring->full = 1; 294 ring->full = 1;
260 295
261 return num; 296 return num;
@@ -264,33 +299,17 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 299static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring) 300 struct mlx4_en_rx_ring *ring)
266{ 301{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index; 302 int index;
272 int nr;
273 303
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod); 305 ring->cons, ring->prod);
276 306
277 /* Unmap and free Rx buffers */ 307 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size); 308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
279 while (ring->cons != ring->prod) { 309 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask; 310 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284 312 mlx4_en_free_rx_desc(priv, ring, index);
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons; 313 ++ring->cons;
295 } 314 }
296} 315}
@@ -454,7 +473,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
454 mlx4_en_update_rx_prod_db(ring); 473 mlx4_en_update_rx_prod_db(ring);
455 474
456 /* Configure SRQ representing the ring */ 475 /* Configure SRQ representing the ring */
457 ring->srq.max = ring->size; 476 ring->srq.max = ring->actual_size;
458 ring->srq.max_gs = max_gs; 477 ring->srq.max_gs = max_gs;
459 ring->srq.wqe_shift = ilog2(ring->stride); 478 ring->srq.wqe_shift = ilog2(ring->stride);
460 479