diff options
author | David S. Miller <davem@davemloft.net> | 2013-01-15 15:05:59 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-15 15:05:59 -0500 |
commit | 4b87f922598acf91eee18f71688a33f54f57bcde (patch) | |
tree | 9cdfe30c6b96c47093da5392ed82d147290cd64c /drivers/net/ethernet/nvidia | |
parent | 55eb555d9674e2ebe9d4de0146602f96ff18e7d6 (diff) | |
parent | daf3ec688e057f6060fb9bb0819feac7a8bbf45c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
Documentation/networking/ip-sysctl.txt
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Both conflicts were simply overlapping context.
A build fix for qlcnic is in here too, simply removing the added
devinit annotations which no longer exist.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/nvidia')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ceb1617645c8..0b8de12bcbca 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1821 | skb->data, | 1821 | skb->data, |
1822 | skb_tailroom(skb), | 1822 | skb_tailroom(skb), |
1823 | PCI_DMA_FROMDEVICE); | 1823 | PCI_DMA_FROMDEVICE); |
1824 | if (pci_dma_mapping_error(np->pci_dev, | ||
1825 | np->put_rx_ctx->dma)) { | ||
1826 | kfree_skb(skb); | ||
1827 | goto packet_dropped; | ||
1828 | } | ||
1824 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | 1829 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
1825 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | 1830 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1826 | wmb(); | 1831 | wmb(); |
@@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1830 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) | 1835 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1831 | np->put_rx_ctx = np->first_rx_ctx; | 1836 | np->put_rx_ctx = np->first_rx_ctx; |
1832 | } else { | 1837 | } else { |
1838 | packet_dropped: | ||
1833 | u64_stats_update_begin(&np->swstats_rx_syncp); | 1839 | u64_stats_update_begin(&np->swstats_rx_syncp); |
1834 | np->stat_rx_dropped++; | 1840 | np->stat_rx_dropped++; |
1835 | u64_stats_update_end(&np->swstats_rx_syncp); | 1841 | u64_stats_update_end(&np->swstats_rx_syncp); |
@@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1856 | skb->data, | 1862 | skb->data, |
1857 | skb_tailroom(skb), | 1863 | skb_tailroom(skb), |
1858 | PCI_DMA_FROMDEVICE); | 1864 | PCI_DMA_FROMDEVICE); |
1865 | if (pci_dma_mapping_error(np->pci_dev, | ||
1866 | np->put_rx_ctx->dma)) { | ||
1867 | kfree_skb(skb); | ||
1868 | goto packet_dropped; | ||
1869 | } | ||
1859 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | 1870 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
1860 | np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); | 1871 | np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); |
1861 | np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); | 1872 | np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); |
@@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1866 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) | 1877 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1867 | np->put_rx_ctx = np->first_rx_ctx; | 1878 | np->put_rx_ctx = np->first_rx_ctx; |
1868 | } else { | 1879 | } else { |
1880 | packet_dropped: | ||
1869 | u64_stats_update_begin(&np->swstats_rx_syncp); | 1881 | u64_stats_update_begin(&np->swstats_rx_syncp); |
1870 | np->stat_rx_dropped++; | 1882 | np->stat_rx_dropped++; |
1871 | u64_stats_update_end(&np->swstats_rx_syncp); | 1883 | u64_stats_update_end(&np->swstats_rx_syncp); |
@@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2217 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 2229 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
2218 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | 2230 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
2219 | PCI_DMA_TODEVICE); | 2231 | PCI_DMA_TODEVICE); |
2232 | if (pci_dma_mapping_error(np->pci_dev, | ||
2233 | np->put_tx_ctx->dma)) { | ||
2234 | /* on DMA mapping error - drop the packet */ | ||
2235 | kfree_skb(skb); | ||
2236 | u64_stats_update_begin(&np->swstats_tx_syncp); | ||
2237 | np->stat_tx_dropped++; | ||
2238 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2239 | return NETDEV_TX_OK; | ||
2240 | } | ||
2220 | np->put_tx_ctx->dma_len = bcnt; | 2241 | np->put_tx_ctx->dma_len = bcnt; |
2221 | np->put_tx_ctx->dma_single = 1; | 2242 | np->put_tx_ctx->dma_single = 1; |
2222 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); | 2243 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
@@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2337 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 2358 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
2338 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | 2359 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
2339 | PCI_DMA_TODEVICE); | 2360 | PCI_DMA_TODEVICE); |
2361 | if (pci_dma_mapping_error(np->pci_dev, | ||
2362 | np->put_tx_ctx->dma)) { | ||
2363 | /* on DMA mapping error - drop the packet */ | ||
2364 | kfree_skb(skb); | ||
2365 | u64_stats_update_begin(&np->swstats_tx_syncp); | ||
2366 | np->stat_tx_dropped++; | ||
2367 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2368 | return NETDEV_TX_OK; | ||
2369 | } | ||
2340 | np->put_tx_ctx->dma_len = bcnt; | 2370 | np->put_tx_ctx->dma_len = bcnt; |
2341 | np->put_tx_ctx->dma_single = 1; | 2371 | np->put_tx_ctx->dma_single = 1; |
2342 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); | 2372 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); |
@@ -5002,6 +5032,11 @@ static int nv_loopback_test(struct net_device *dev) | |||
5002 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | 5032 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, |
5003 | skb_tailroom(tx_skb), | 5033 | skb_tailroom(tx_skb), |
5004 | PCI_DMA_FROMDEVICE); | 5034 | PCI_DMA_FROMDEVICE); |
5035 | if (pci_dma_mapping_error(np->pci_dev, | ||
5036 | test_dma_addr)) { | ||
5037 | dev_kfree_skb_any(tx_skb); | ||
5038 | goto out; | ||
5039 | } | ||
5005 | pkt_data = skb_put(tx_skb, pkt_len); | 5040 | pkt_data = skb_put(tx_skb, pkt_len); |
5006 | for (i = 0; i < pkt_len; i++) | 5041 | for (i = 0; i < pkt_len; i++) |
5007 | pkt_data[i] = (u8)(i & 0xff); | 5042 | pkt_data[i] = (u8)(i & 0xff); |