aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_rx.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2009-05-23 23:17:11 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-25 03:35:53 -0400
commit38aab07c14adbf3c7257793d764a91923341e96a (patch)
treeac27575621410056b4ec970db6b233a50600d15b /drivers/net/mlx4/en_rx.c
parent8e29291650ee53a8609d9cc3a303dcbe9aa9b542 (diff)
mlx4_en: Fix partial rings feature
In case of allocation failure, the actual ring size is rounded down to nearest power of 2. The remaining descriptors are freed. The CQ and SRQ are allocated with the actual size and the mask is updated. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_rx.c')
-rw-r--r--drivers/net/mlx4/en_rx.c71
1 files changed, 45 insertions, 26 deletions
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 9ee873e872b3..6bfab6e5ba1d 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -202,12 +202,35 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203} 203}
204 204
205static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
206 struct mlx4_en_rx_ring *ring,
207 int index)
208{
209 struct mlx4_en_dev *mdev = priv->mdev;
210 struct skb_frag_struct *skb_frags;
211 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
212 dma_addr_t dma;
213 int nr;
214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page);
224 }
225}
226
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{ 228{
207 struct mlx4_en_dev *mdev = priv->mdev; 229 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring; 230 struct mlx4_en_rx_ring *ring;
209 int ring_ind; 231 int ring_ind;
210 int buf_ind; 232 int buf_ind;
233 int new_size;
211 234
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 235 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 236 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -220,18 +243,30 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
220 "enough rx buffers\n"); 243 "enough rx buffers\n");
221 return -ENOMEM; 244 return -ENOMEM;
222 } else { 245 } else {
223 if (netif_msg_rx_err(priv)) 246 new_size = rounddown_pow_of_two(ring->actual_size);
224 mlx4_warn(mdev, 247 mlx4_warn(mdev, "Only %d buffers allocated "
225 "Only %d buffers allocated\n", 248 "reducing ring size to %d",
226 ring->actual_size); 249 ring->actual_size, new_size);
227 goto out; 250 goto reduce_rings;
228 } 251 }
229 } 252 }
230 ring->actual_size++; 253 ring->actual_size++;
231 ring->prod++; 254 ring->prod++;
232 } 255 }
233 } 256 }
234out: 257 return 0;
258
259reduce_rings:
260 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
261 ring = &priv->rx_ring[ring_ind];
262 while (ring->actual_size > new_size) {
263 ring->actual_size--;
264 ring->prod--;
265 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
266 }
267 ring->size_mask = ring->actual_size - 1;
268 }
269
235 return 0; 270 return 0;
236} 271}
237 272
@@ -255,7 +290,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
255 ++num; 290 ++num;
256 ++ring->prod; 291 ++ring->prod;
257 } 292 }
258 if ((u32) (ring->prod - ring->cons) == ring->size) 293 if ((u32) (ring->prod - ring->cons) == ring->actual_size)
259 ring->full = 1; 294 ring->full = 1;
260 295
261 return num; 296 return num;
@@ -264,33 +299,17 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 299static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring) 300 struct mlx4_en_rx_ring *ring)
266{ 301{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index; 302 int index;
272 int nr;
273 303
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod); 305 ring->cons, ring->prod);
276 306
277 /* Unmap and free Rx buffers */ 307 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size); 308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
279 while (ring->cons != ring->prod) { 309 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask; 310 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284 312 mlx4_en_free_rx_desc(priv, ring, index);
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons; 313 ++ring->cons;
295 } 314 }
296} 315}
@@ -454,7 +473,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
454 mlx4_en_update_rx_prod_db(ring); 473 mlx4_en_update_rx_prod_db(ring);
455 474
456 /* Configure SRQ representing the ring */ 475 /* Configure SRQ representing the ring */
457 ring->srq.max = ring->size; 476 ring->srq.max = ring->actual_size;
458 ring->srq.max_gs = max_gs; 477 ring->srq.max_gs = max_gs;
459 ring->srq.wqe_shift = ilog2(ring->stride); 478 ring->srq.wqe_shift = ilog2(ring->stride);
460 479