aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/nvidia/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/nvidia/forcedeth.c')
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 653487dc7b52..87fa5919c455 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev)
1821 skb->data, 1821 skb->data,
1822 skb_tailroom(skb), 1822 skb_tailroom(skb),
1823 PCI_DMA_FROMDEVICE); 1823 PCI_DMA_FROMDEVICE);
1824 if (pci_dma_mapping_error(np->pci_dev,
1825 np->put_rx_ctx->dma)) {
1826 kfree_skb(skb);
1827 goto packet_dropped;
1828 }
1824 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1829 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1825 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1830 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1826 wmb(); 1831 wmb();
@@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev)
1830 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1835 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1831 np->put_rx_ctx = np->first_rx_ctx; 1836 np->put_rx_ctx = np->first_rx_ctx;
1832 } else { 1837 } else {
1838packet_dropped:
1833 u64_stats_update_begin(&np->swstats_rx_syncp); 1839 u64_stats_update_begin(&np->swstats_rx_syncp);
1834 np->stat_rx_dropped++; 1840 np->stat_rx_dropped++;
1835 u64_stats_update_end(&np->swstats_rx_syncp); 1841 u64_stats_update_end(&np->swstats_rx_syncp);
@@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1856 skb->data, 1862 skb->data,
1857 skb_tailroom(skb), 1863 skb_tailroom(skb),
1858 PCI_DMA_FROMDEVICE); 1864 PCI_DMA_FROMDEVICE);
1865 if (pci_dma_mapping_error(np->pci_dev,
1866 np->put_rx_ctx->dma)) {
1867 kfree_skb(skb);
1868 goto packet_dropped;
1869 }
1859 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1870 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1860 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1871 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1861 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1872 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
@@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1866 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1877 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1867 np->put_rx_ctx = np->first_rx_ctx; 1878 np->put_rx_ctx = np->first_rx_ctx;
1868 } else { 1879 } else {
1880packet_dropped:
1869 u64_stats_update_begin(&np->swstats_rx_syncp); 1881 u64_stats_update_begin(&np->swstats_rx_syncp);
1870 np->stat_rx_dropped++; 1882 np->stat_rx_dropped++;
1871 u64_stats_update_end(&np->swstats_rx_syncp); 1883 u64_stats_update_end(&np->swstats_rx_syncp);
@@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2217 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2229 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2218 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2230 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2219 PCI_DMA_TODEVICE); 2231 PCI_DMA_TODEVICE);
2232 if (pci_dma_mapping_error(np->pci_dev,
2233 np->put_tx_ctx->dma)) {
2234 /* on DMA mapping error - drop the packet */
2235 kfree_skb(skb);
2236 u64_stats_update_begin(&np->swstats_tx_syncp);
2237 np->stat_tx_dropped++;
2238 u64_stats_update_end(&np->swstats_tx_syncp);
2239 return NETDEV_TX_OK;
2240 }
2220 np->put_tx_ctx->dma_len = bcnt; 2241 np->put_tx_ctx->dma_len = bcnt;
2221 np->put_tx_ctx->dma_single = 1; 2242 np->put_tx_ctx->dma_single = 1;
2222 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2243 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2337 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2358 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2338 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2359 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2339 PCI_DMA_TODEVICE); 2360 PCI_DMA_TODEVICE);
2361 if (pci_dma_mapping_error(np->pci_dev,
2362 np->put_tx_ctx->dma)) {
2363 /* on DMA mapping error - drop the packet */
2364 kfree_skb(skb);
2365 u64_stats_update_begin(&np->swstats_tx_syncp);
2366 np->stat_tx_dropped++;
2367 u64_stats_update_end(&np->swstats_tx_syncp);
2368 return NETDEV_TX_OK;
2369 }
2340 np->put_tx_ctx->dma_len = bcnt; 2370 np->put_tx_ctx->dma_len = bcnt;
2341 np->put_tx_ctx->dma_single = 1; 2371 np->put_tx_ctx->dma_single = 1;
2342 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2372 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -5003,6 +5033,11 @@ static int nv_loopback_test(struct net_device *dev)
5003 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 5033 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5004 skb_tailroom(tx_skb), 5034 skb_tailroom(tx_skb),
5005 PCI_DMA_FROMDEVICE); 5035 PCI_DMA_FROMDEVICE);
5036 if (pci_dma_mapping_error(np->pci_dev,
5037 test_dma_addr)) {
5038 dev_kfree_skb_any(tx_skb);
5039 goto out;
5040 }
5006 pkt_data = skb_put(tx_skb, pkt_len); 5041 pkt_data = skb_put(tx_skb, pkt_len);
5007 for (i = 0; i < pkt_len; i++) 5042 for (i = 0; i < pkt_len; i++)
5008 pkt_data[i] = (u8)(i & 0xff); 5043 pkt_data[i] = (u8)(i & 0xff);