aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSergei Shtylyov <sergei.shtylyov@cogentembedded.com>2015-07-21 18:31:59 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-22 01:34:27 -0400
commitd8b48911fd249bc1a3431a9515619403c96d6af3 (patch)
tree8c2c420f08b034b03499ac9d8a0dd27d73f0a6ab
parenta46fa260f6f5e8f80a725b28e4aee5a04d1bd79e (diff)
ravb: fix ring memory allocation
The driver is written as if it can adapt to a low memory situation allocating less RX skbs and TX aligned buffers than the respective RX/TX ring sizes. In reality though the driver would malfunction in this case. Stop being overly smart and just fail in such situation -- this is achieved by moving the memory allocation from ravb_ring_format() to ravb_ring_init(). We leave dma_map_single() calls in place but make their failure non-fatal by marking the corresponding RX descriptors with zero data size which should prevent DMA to an invalid addresses. Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c59
1 files changed, 34 insertions, 25 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d08c250e843e..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -587,10 +595,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
587 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
588 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
589 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
590 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
591 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
592 break; 600 */
593 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
594 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
595 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
596 } 605 }