aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Kochetkov <al.kochet@gmail.com>2017-12-15 12:20:06 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-19 13:24:23 -0500
commite688822d035b494071ecbadcccbd6f3325fb0f59 (patch)
treeff923bd3c48d2f90919f102f76b99ef7272b5974
parent7352e252b5bf40d59342494a70354a2d436fd0cd (diff)
net: arc_emac: fix arc_emac_rx() error paths
arc_emac_rx() has some issues found by code review. In case netdev_alloc_skb_ip_align() or dma_map_single() failure rx fifo entry will not be returned to EMAC. In case dma_map_single() failure previously allocated skb became lost to driver. At the same time address of newly allocated skb will not be provided to EMAC. Signed-off-by: Alexander Kochetkov <al.kochet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce718..5b422be56165 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
210 continue; 210 continue;
211 } 211 }
212 212
213 pktlen = info & LEN_MASK; 213 /* Prepare the BD for next cycle. netif_receive_skb()
214 stats->rx_packets++; 214 * only if new skb was allocated and mapped to avoid holes
215 stats->rx_bytes += pktlen; 215 * in the RX fifo.
216 skb = rx_buff->skb; 216 */
217 skb_put(skb, pktlen); 217 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
218 skb->dev = ndev; 218 if (unlikely(!skb)) {
219 skb->protocol = eth_type_trans(skb, ndev); 219 if (net_ratelimit())
220 220 netdev_err(ndev, "cannot allocate skb\n");
221 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 221 /* Return ownership to EMAC */
222 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 222 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
223
224 /* Prepare the BD for next cycle */
225 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
226 EMAC_BUFFER_SIZE);
227 if (unlikely(!rx_buff->skb)) {
228 stats->rx_errors++; 223 stats->rx_errors++;
229 /* Because receive_skb is below, increment rx_dropped */
230 stats->rx_dropped++; 224 stats->rx_dropped++;
231 continue; 225 continue;
232 } 226 }
233 227
234 /* receive_skb only if new skb was allocated to avoid holes */ 228 addr = dma_map_single(&ndev->dev, (void *)skb->data,
235 netif_receive_skb(skb);
236
237 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
238 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 229 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
239 if (dma_mapping_error(&ndev->dev, addr)) { 230 if (dma_mapping_error(&ndev->dev, addr)) {
240 if (net_ratelimit()) 231 if (net_ratelimit())
241 netdev_err(ndev, "cannot dma map\n"); 232 netdev_err(ndev, "cannot map dma buffer\n");
242 dev_kfree_skb(rx_buff->skb); 233 dev_kfree_skb(skb);
234 /* Return ownership to EMAC */
235 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
243 stats->rx_errors++; 236 stats->rx_errors++;
237 stats->rx_dropped++;
244 continue; 238 continue;
245 } 239 }
240
241 /* unmap previosly mapped skb */
242 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
243 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
244
245 pktlen = info & LEN_MASK;
246 stats->rx_packets++;
247 stats->rx_bytes += pktlen;
248 skb_put(rx_buff->skb, pktlen);
249 rx_buff->skb->dev = ndev;
250 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
251
252 netif_receive_skb(rx_buff->skb);
253
254 rx_buff->skb = skb;
246 dma_unmap_addr_set(rx_buff, addr, addr); 255 dma_unmap_addr_set(rx_buff, addr, addr);
247 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 256 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
248 257