diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 4 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 14 | ||||
-rw-r--r-- | drivers/net/cassini.c | 12 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/e100.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 11 | ||||
-rw-r--r-- | drivers/net/ibmveth.c | 219 | ||||
-rw-r--r-- | drivers/net/ibmveth.h | 5 | ||||
-rw-r--r-- | drivers/net/iseries_veth.c | 4 | ||||
-rw-r--r-- | drivers/net/mlx4/eq.c | 2 | ||||
-rw-r--r-- | drivers/net/pasemi_mac.c | 6 | ||||
-rw-r--r-- | drivers/net/ppp_generic.c | 6 | ||||
-rw-r--r-- | drivers/net/qla3xxx.c | 12 | ||||
-rw-r--r-- | drivers/net/s2io.c | 48 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 4 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 7 | ||||
-rw-r--r-- | drivers/net/spider_net.c | 4 | ||||
-rw-r--r-- | drivers/net/tc35815.c | 4 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 114 | ||||
-rw-r--r-- | drivers/net/wireless/ath5k/base.c | 4 |
21 files changed, 354 insertions, 134 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f3472..18d3eeb7eab2 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); |
485 | if (dma_mapping_error(d)) { | 485 | if (dma_mapping_error(NULL, d)) { |
486 | free_page((unsigned long)page); | 486 | free_page((unsigned long)page); |
487 | goto err; | 487 | goto err; |
488 | } | 488 | } |
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
505 | goto err; | 505 | goto err; |
506 | 506 | ||
507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); |
508 | if (dma_mapping_error(d)) { | 508 | if (dma_mapping_error(NULL, d)) { |
509 | free_page((unsigned long)page); | 509 | free_page((unsigned long)page); |
510 | goto err; | 510 | goto err; |
511 | } | 511 | } |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6d..af251a5df844 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
814 | } | 814 | } |
815 | 815 | ||
816 | /* release skb */ | 816 | /* release skb */ |
817 | BUG_TRAP(skb); | 817 | WARN_ON(!skb); |
818 | dev_kfree_skb(skb); | 818 | dev_kfree_skb(skb); |
819 | tx_buf->first_bd = 0; | 819 | tx_buf->first_bd = 0; |
820 | tx_buf->skb = NULL; | 820 | tx_buf->skb = NULL; |
@@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |||
837 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | 837 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; |
838 | 838 | ||
839 | #ifdef BNX2X_STOP_ON_ERROR | 839 | #ifdef BNX2X_STOP_ON_ERROR |
840 | BUG_TRAP(used >= 0); | 840 | WARN_ON(used < 0); |
841 | BUG_TRAP(used <= fp->bp->tx_ring_size); | 841 | WARN_ON(used > fp->bp->tx_ring_size); |
842 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); | 842 | WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); |
843 | #endif | 843 | #endif |
844 | 844 | ||
845 | return (s16)(fp->bp->tx_ring_size) - used; | 845 | return (s16)(fp->bp->tx_ring_size) - used; |
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1020 | 1020 | ||
1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, |
1022 | PCI_DMA_FROMDEVICE); | 1022 | PCI_DMA_FROMDEVICE); |
1023 | if (unlikely(dma_mapping_error(mapping))) { | 1023 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1025 | return -ENOMEM; | 1025 | return -ENOMEM; |
1026 | } | 1026 | } |
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1048 | 1048 | ||
1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
1050 | PCI_DMA_FROMDEVICE); | 1050 | PCI_DMA_FROMDEVICE); |
1051 | if (unlikely(dma_mapping_error(mapping))) { | 1051 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1052 | dev_kfree_skb(skb); | 1052 | dev_kfree_skb(skb); |
1053 | return -ENOMEM; | 1053 | return -ENOMEM; |
1054 | } | 1054 | } |
@@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4374 | } | 4374 | } |
4375 | ring_prod = NEXT_RX_IDX(ring_prod); | 4375 | ring_prod = NEXT_RX_IDX(ring_prod); |
4376 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | 4376 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); |
4377 | BUG_TRAP(ring_prod > i); | 4377 | WARN_ON(ring_prod <= i); |
4378 | } | 4378 | } |
4379 | 4379 | ||
4380 | fp->rx_bd_prod = ring_prod; | 4380 | fp->rx_bd_prod = ring_prod; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 83768df27806..f1936d51b458 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) | |||
576 | list_for_each_safe(elem, tmp, &list) { | 576 | list_for_each_safe(elem, tmp, &list) { |
577 | cas_page_t *page = list_entry(elem, cas_page_t, list); | 577 | cas_page_t *page = list_entry(elem, cas_page_t, list); |
578 | 578 | ||
579 | /* | ||
580 | * With the lockless pagecache, cassini buffering scheme gets | ||
581 | * slightly less accurate: we might find that a page has an | ||
582 | * elevated reference count here, due to a speculative ref, | ||
583 | * and skip it as in-use. Ideally we would be able to reclaim | ||
584 | * it. However this would be such a rare case, it doesn't | ||
585 | * matter too much as we should pick it up the next time round. | ||
586 | * | ||
587 | * Importantly, if we find that the page has a refcount of 1 | ||
588 | * here (our refcount), then we know it is definitely not inuse | ||
589 | * so we can reuse it. | ||
590 | */ | ||
579 | if (page_count(page->buffer) > 1) | 591 | if (page_count(page->buffer) > 1) |
580 | continue; | 592 | continue; |
581 | 593 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e6..1b0861d73ab7 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, | |||
386 | dma_addr_t mapping; | 386 | dma_addr_t mapping; |
387 | 387 | ||
388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); | 388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
389 | if (unlikely(pci_dma_mapping_error(mapping))) | 389 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | 391 | ||
392 | pci_unmap_addr_set(sd, dma_addr, mapping); | 392 | pci_unmap_addr_set(sd, dma_addr, mapping); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b1332312..19d32a227be1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1792 | 1792 | ||
1793 | if (pci_dma_mapping_error(rx->dma_addr)) { | 1793 | if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { |
1794 | dev_kfree_skb_any(rx->skb); | 1794 | dev_kfree_skb_any(rx->skb); |
1795 | rx->skb = NULL; | 1795 | rx->skb = NULL; |
1796 | rx->dma_addr = 0; | 1796 | rx->dma_addr = 0; |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db0..9350564065e7 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1090 | tx_ring->buffer_info[i].dma = | 1090 | tx_ring->buffer_info[i].dma = |
1091 | pci_map_single(pdev, skb->data, skb->len, | 1091 | pci_map_single(pdev, skb->data, skb->len, |
1092 | PCI_DMA_TODEVICE); | 1092 | PCI_DMA_TODEVICE); |
1093 | if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { | 1093 | if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { |
1094 | ret_val = 4; | 1094 | ret_val = 4; |
1095 | goto err_nomem; | 1095 | goto err_nomem; |
1096 | } | 1096 | } |
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1153 | rx_ring->buffer_info[i].dma = | 1153 | rx_ring->buffer_info[i].dma = |
1154 | pci_map_single(pdev, skb->data, 2048, | 1154 | pci_map_single(pdev, skb->data, 2048, |
1155 | PCI_DMA_FROMDEVICE); | 1155 | PCI_DMA_FROMDEVICE); |
1156 | if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { | 1156 | if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { |
1157 | ret_val = 8; | 1157 | ret_val = 8; |
1158 | goto err_nomem; | 1158 | goto err_nomem; |
1159 | } | 1159 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c518..d13677899767 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -195,7 +195,7 @@ map_skb: | |||
195 | buffer_info->dma = pci_map_single(pdev, skb->data, | 195 | buffer_info->dma = pci_map_single(pdev, skb->data, |
196 | adapter->rx_buffer_len, | 196 | adapter->rx_buffer_len, |
197 | PCI_DMA_FROMDEVICE); | 197 | PCI_DMA_FROMDEVICE); |
198 | if (pci_dma_mapping_error(buffer_info->dma)) { | 198 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
199 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 199 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
200 | adapter->rx_dma_failed++; | 200 | adapter->rx_dma_failed++; |
201 | break; | 201 | break; |
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
265 | ps_page->page, | 265 | ps_page->page, |
266 | 0, PAGE_SIZE, | 266 | 0, PAGE_SIZE, |
267 | PCI_DMA_FROMDEVICE); | 267 | PCI_DMA_FROMDEVICE); |
268 | if (pci_dma_mapping_error(ps_page->dma)) { | 268 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { |
269 | dev_err(&adapter->pdev->dev, | 269 | dev_err(&adapter->pdev->dev, |
270 | "RX DMA page map failed\n"); | 270 | "RX DMA page map failed\n"); |
271 | adapter->rx_dma_failed++; | 271 | adapter->rx_dma_failed++; |
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
300 | buffer_info->dma = pci_map_single(pdev, skb->data, | 300 | buffer_info->dma = pci_map_single(pdev, skb->data, |
301 | adapter->rx_ps_bsize0, | 301 | adapter->rx_ps_bsize0, |
302 | PCI_DMA_FROMDEVICE); | 302 | PCI_DMA_FROMDEVICE); |
303 | if (pci_dma_mapping_error(buffer_info->dma)) { | 303 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
304 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 304 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
305 | adapter->rx_dma_failed++; | 305 | adapter->rx_dma_failed++; |
306 | /* cleanup skb */ | 306 | /* cleanup skb */ |
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3344 | skb->data + offset, | 3344 | skb->data + offset, |
3345 | size, | 3345 | size, |
3346 | PCI_DMA_TODEVICE); | 3346 | PCI_DMA_TODEVICE); |
3347 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3347 | if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { |
3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | 3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3349 | adapter->tx_dma_failed++; | 3349 | adapter->tx_dma_failed++; |
3350 | return -1; | 3350 | return -1; |
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3382 | offset, | 3382 | offset, |
3383 | size, | 3383 | size, |
3384 | PCI_DMA_TODEVICE); | 3384 | PCI_DMA_TODEVICE); |
3385 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3385 | if (pci_dma_mapping_error(adapter->pdev, |
3386 | buffer_info->dma)) { | ||
3386 | dev_err(&adapter->pdev->dev, | 3387 | dev_err(&adapter->pdev->dev, |
3387 | "TX DMA page map failed\n"); | 3388 | "TX DMA page map failed\n"); |
3388 | adapter->tx_dma_failed++; | 3389 | adapter->tx_dma_failed++; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 00527805e4f1..91ec9fdc7184 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/moduleparam.h> | ||
36 | #include <linux/types.h> | 37 | #include <linux/types.h> |
37 | #include <linux/errno.h> | 38 | #include <linux/errno.h> |
38 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
@@ -52,7 +53,9 @@ | |||
52 | #include <asm/hvcall.h> | 53 | #include <asm/hvcall.h> |
53 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
54 | #include <asm/vio.h> | 55 | #include <asm/vio.h> |
56 | #include <asm/iommu.h> | ||
55 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
58 | #include <asm/firmware.h> | ||
56 | #include <linux/seq_file.h> | 59 | #include <linux/seq_file.h> |
57 | 60 | ||
58 | #include "ibmveth.h" | 61 | #include "ibmveth.h" |
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
94 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
95 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); | 98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
96 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 99 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
100 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); | ||
97 | static struct kobj_type ktype_veth_pool; | 101 | static struct kobj_type ktype_veth_pool; |
98 | 102 | ||
103 | |||
99 | #ifdef CONFIG_PROC_FS | 104 | #ifdef CONFIG_PROC_FS |
100 | #define IBMVETH_PROC_DIR "ibmveth" | 105 | #define IBMVETH_PROC_DIR "ibmveth" |
101 | static struct proc_dir_entry *ibmveth_proc_dir; | 106 | static struct proc_dir_entry *ibmveth_proc_dir; |
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
226 | u32 i; | 231 | u32 i; |
227 | u32 count = pool->size - atomic_read(&pool->available); | 232 | u32 count = pool->size - atomic_read(&pool->available); |
228 | u32 buffers_added = 0; | 233 | u32 buffers_added = 0; |
234 | struct sk_buff *skb; | ||
235 | unsigned int free_index, index; | ||
236 | u64 correlator; | ||
237 | unsigned long lpar_rc; | ||
238 | dma_addr_t dma_addr; | ||
229 | 239 | ||
230 | mb(); | 240 | mb(); |
231 | 241 | ||
232 | for(i = 0; i < count; ++i) { | 242 | for(i = 0; i < count; ++i) { |
233 | struct sk_buff *skb; | ||
234 | unsigned int free_index, index; | ||
235 | u64 correlator; | ||
236 | union ibmveth_buf_desc desc; | 243 | union ibmveth_buf_desc desc; |
237 | unsigned long lpar_rc; | ||
238 | dma_addr_t dma_addr; | ||
239 | 244 | ||
240 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | 245 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); |
241 | 246 | ||
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
255 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
256 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
257 | 262 | ||
263 | if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) | ||
264 | goto failure; | ||
265 | |||
258 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
259 | pool->dma_addr[index] = dma_addr; | 267 | pool->dma_addr[index] = dma_addr; |
260 | pool->skbuff[index] = skb; | 268 | pool->skbuff[index] = skb; |
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
267 | 275 | ||
268 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 276 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
269 | 277 | ||
270 | if(lpar_rc != H_SUCCESS) { | 278 | if (lpar_rc != H_SUCCESS) |
271 | pool->free_map[free_index] = index; | 279 | goto failure; |
272 | pool->skbuff[index] = NULL; | 280 | else { |
273 | if (pool->consumer_index == 0) | ||
274 | pool->consumer_index = pool->size - 1; | ||
275 | else | ||
276 | pool->consumer_index--; | ||
277 | dma_unmap_single(&adapter->vdev->dev, | ||
278 | pool->dma_addr[index], pool->buff_size, | ||
279 | DMA_FROM_DEVICE); | ||
280 | dev_kfree_skb_any(skb); | ||
281 | adapter->replenish_add_buff_failure++; | ||
282 | break; | ||
283 | } else { | ||
284 | buffers_added++; | 281 | buffers_added++; |
285 | adapter->replenish_add_buff_success++; | 282 | adapter->replenish_add_buff_success++; |
286 | } | 283 | } |
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
288 | 285 | ||
289 | mb(); | 286 | mb(); |
290 | atomic_add(buffers_added, &(pool->available)); | 287 | atomic_add(buffers_added, &(pool->available)); |
288 | return; | ||
289 | |||
290 | failure: | ||
291 | pool->free_map[free_index] = index; | ||
292 | pool->skbuff[index] = NULL; | ||
293 | if (pool->consumer_index == 0) | ||
294 | pool->consumer_index = pool->size - 1; | ||
295 | else | ||
296 | pool->consumer_index--; | ||
297 | if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) | ||
298 | dma_unmap_single(&adapter->vdev->dev, | ||
299 | pool->dma_addr[index], pool->buff_size, | ||
300 | DMA_FROM_DEVICE); | ||
301 | dev_kfree_skb_any(skb); | ||
302 | adapter->replenish_add_buff_failure++; | ||
303 | |||
304 | mb(); | ||
305 | atomic_add(buffers_added, &(pool->available)); | ||
291 | } | 306 | } |
292 | 307 | ||
293 | /* replenish routine */ | 308 | /* replenish routine */ |
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
297 | 312 | ||
298 | adapter->replenish_task_cycles++; | 313 | adapter->replenish_task_cycles++; |
299 | 314 | ||
300 | for(i = 0; i < IbmVethNumBufferPools; i++) | 315 | for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) |
301 | if(adapter->rx_buff_pool[i].active) | 316 | if(adapter->rx_buff_pool[i].active) |
302 | ibmveth_replenish_buffer_pool(adapter, | 317 | ibmveth_replenish_buffer_pool(adapter, |
303 | &adapter->rx_buff_pool[i]); | 318 | &adapter->rx_buff_pool[i]); |
@@ -433,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
433 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
434 | { | 449 | { |
435 | int i; | 450 | int i; |
451 | struct device *dev = &adapter->vdev->dev; | ||
436 | 452 | ||
437 | if(adapter->buffer_list_addr != NULL) { | 453 | if(adapter->buffer_list_addr != NULL) { |
438 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 454 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
439 | dma_unmap_single(&adapter->vdev->dev, | 455 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
440 | adapter->buffer_list_dma, 4096, | ||
441 | DMA_BIDIRECTIONAL); | 456 | DMA_BIDIRECTIONAL); |
442 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 457 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
443 | } | 458 | } |
@@ -446,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
446 | } | 461 | } |
447 | 462 | ||
448 | if(adapter->filter_list_addr != NULL) { | 463 | if(adapter->filter_list_addr != NULL) { |
449 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 464 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
450 | dma_unmap_single(&adapter->vdev->dev, | 465 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
451 | adapter->filter_list_dma, 4096, | ||
452 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
453 | adapter->filter_list_dma = DMA_ERROR_CODE; | 467 | adapter->filter_list_dma = DMA_ERROR_CODE; |
454 | } | 468 | } |
@@ -457,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
457 | } | 471 | } |
458 | 472 | ||
459 | if(adapter->rx_queue.queue_addr != NULL) { | 473 | if(adapter->rx_queue.queue_addr != NULL) { |
460 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | 474 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
461 | dma_unmap_single(&adapter->vdev->dev, | 475 | dma_unmap_single(dev, |
462 | adapter->rx_queue.queue_dma, | 476 | adapter->rx_queue.queue_dma, |
463 | adapter->rx_queue.queue_len, | 477 | adapter->rx_queue.queue_len, |
464 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
@@ -472,6 +486,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
472 | if (adapter->rx_buff_pool[i].active) | 486 | if (adapter->rx_buff_pool[i].active) |
473 | ibmveth_free_buffer_pool(adapter, | 487 | ibmveth_free_buffer_pool(adapter, |
474 | &adapter->rx_buff_pool[i]); | 488 | &adapter->rx_buff_pool[i]); |
489 | |||
490 | if (adapter->bounce_buffer != NULL) { | ||
491 | if (!dma_mapping_error(adapter->bounce_buffer_dma)) { | ||
492 | dma_unmap_single(&adapter->vdev->dev, | ||
493 | adapter->bounce_buffer_dma, | ||
494 | adapter->netdev->mtu + IBMVETH_BUFF_OH, | ||
495 | DMA_BIDIRECTIONAL); | ||
496 | adapter->bounce_buffer_dma = DMA_ERROR_CODE; | ||
497 | } | ||
498 | kfree(adapter->bounce_buffer); | ||
499 | adapter->bounce_buffer = NULL; | ||
500 | } | ||
475 | } | 501 | } |
476 | 502 | ||
477 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | 503 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
@@ -508,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
508 | int rc; | 534 | int rc; |
509 | union ibmveth_buf_desc rxq_desc; | 535 | union ibmveth_buf_desc rxq_desc; |
510 | int i; | 536 | int i; |
537 | struct device *dev; | ||
511 | 538 | ||
512 | ibmveth_debug_printk("open starting\n"); | 539 | ibmveth_debug_printk("open starting\n"); |
513 | 540 | ||
@@ -536,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) | |||
536 | return -ENOMEM; | 563 | return -ENOMEM; |
537 | } | 564 | } |
538 | 565 | ||
539 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | 566 | dev = &adapter->vdev->dev; |
567 | |||
568 | adapter->buffer_list_dma = dma_map_single(dev, | ||
540 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 569 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
541 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | 570 | adapter->filter_list_dma = dma_map_single(dev, |
542 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 571 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
543 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | 572 | adapter->rx_queue.queue_dma = dma_map_single(dev, |
544 | adapter->rx_queue.queue_addr, | 573 | adapter->rx_queue.queue_addr, |
545 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | 574 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); |
546 | 575 | ||
547 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
548 | (dma_mapping_error(adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
549 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
550 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
551 | ibmveth_cleanup(adapter); | 580 | ibmveth_cleanup(adapter); |
552 | napi_disable(&adapter->napi); | 581 | napi_disable(&adapter->napi); |
@@ -607,6 +636,24 @@ static int ibmveth_open(struct net_device *netdev) | |||
607 | return rc; | 636 | return rc; |
608 | } | 637 | } |
609 | 638 | ||
639 | adapter->bounce_buffer = | ||
640 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); | ||
641 | if (!adapter->bounce_buffer) { | ||
642 | ibmveth_error_printk("unable to allocate bounce buffer\n"); | ||
643 | ibmveth_cleanup(adapter); | ||
644 | napi_disable(&adapter->napi); | ||
645 | return -ENOMEM; | ||
646 | } | ||
647 | adapter->bounce_buffer_dma = | ||
648 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | ||
649 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | ||
650 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | ||
651 | ibmveth_error_printk("unable to map bounce buffer\n"); | ||
652 | ibmveth_cleanup(adapter); | ||
653 | napi_disable(&adapter->napi); | ||
654 | return -ENOMEM; | ||
655 | } | ||
656 | |||
610 | ibmveth_debug_printk("initial replenish cycle\n"); | 657 | ibmveth_debug_printk("initial replenish cycle\n"); |
611 | ibmveth_interrupt(netdev->irq, netdev); | 658 | ibmveth_interrupt(netdev->irq, netdev); |
612 | 659 | ||
@@ -853,10 +900,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
853 | unsigned int tx_packets = 0; | 900 | unsigned int tx_packets = 0; |
854 | unsigned int tx_send_failed = 0; | 901 | unsigned int tx_send_failed = 0; |
855 | unsigned int tx_map_failed = 0; | 902 | unsigned int tx_map_failed = 0; |
903 | int used_bounce = 0; | ||
904 | unsigned long data_dma_addr; | ||
856 | 905 | ||
857 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
858 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 907 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
859 | skb->len, DMA_TO_DEVICE); | 908 | skb->len, DMA_TO_DEVICE); |
860 | 909 | ||
861 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 910 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
862 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 911 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -875,12 +924,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
875 | buf[1] = 0; | 924 | buf[1] = 0; |
876 | } | 925 | } |
877 | 926 | ||
878 | if (dma_mapping_error(desc.fields.address)) { | 927 | if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { |
879 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | ||
930 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | ||
931 | skb->len); | ||
932 | desc.fields.address = adapter->bounce_buffer_dma; | ||
880 | tx_map_failed++; | 933 | tx_map_failed++; |
881 | tx_dropped++; | 934 | used_bounce = 1; |
882 | goto out; | 935 | } else |
883 | } | 936 | desc.fields.address = data_dma_addr; |
884 | 937 | ||
885 | /* send the frame. Arbitrarily set retrycount to 1024 */ | 938 | /* send the frame. Arbitrarily set retrycount to 1024 */ |
886 | correlator = 0; | 939 | correlator = 0; |
@@ -904,8 +957,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
904 | netdev->trans_start = jiffies; | 957 | netdev->trans_start = jiffies; |
905 | } | 958 | } |
906 | 959 | ||
907 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, | 960 | if (!used_bounce) |
908 | skb->len, DMA_TO_DEVICE); | 961 | dma_unmap_single(&adapter->vdev->dev, data_dma_addr, |
962 | skb->len, DMA_TO_DEVICE); | ||
909 | 963 | ||
910 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 964 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
911 | netdev->stats.tx_dropped += tx_dropped; | 965 | netdev->stats.tx_dropped += tx_dropped; |
@@ -1053,9 +1107,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1053 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 1107 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
1054 | { | 1108 | { |
1055 | struct ibmveth_adapter *adapter = dev->priv; | 1109 | struct ibmveth_adapter *adapter = dev->priv; |
1110 | struct vio_dev *viodev = adapter->vdev; | ||
1056 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | 1111 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
1057 | int reinit = 0; | 1112 | int i; |
1058 | int i, rc; | ||
1059 | 1113 | ||
1060 | if (new_mtu < IBMVETH_MAX_MTU) | 1114 | if (new_mtu < IBMVETH_MAX_MTU) |
1061 | return -EINVAL; | 1115 | return -EINVAL; |
@@ -1067,23 +1121,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1067 | if (i == IbmVethNumBufferPools) | 1121 | if (i == IbmVethNumBufferPools) |
1068 | return -EINVAL; | 1122 | return -EINVAL; |
1069 | 1123 | ||
1124 | /* Deactivate all the buffer pools so that the next loop can activate | ||
1125 | only the buffer pools necessary to hold the new MTU */ | ||
1126 | for (i = 0; i < IbmVethNumBufferPools; i++) | ||
1127 | if (adapter->rx_buff_pool[i].active) { | ||
1128 | ibmveth_free_buffer_pool(adapter, | ||
1129 | &adapter->rx_buff_pool[i]); | ||
1130 | adapter->rx_buff_pool[i].active = 0; | ||
1131 | } | ||
1132 | |||
1070 | /* Look for an active buffer pool that can hold the new MTU */ | 1133 | /* Look for an active buffer pool that can hold the new MTU */ |
1071 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1134 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
1072 | if (!adapter->rx_buff_pool[i].active) { | 1135 | adapter->rx_buff_pool[i].active = 1; |
1073 | adapter->rx_buff_pool[i].active = 1; | ||
1074 | reinit = 1; | ||
1075 | } | ||
1076 | 1136 | ||
1077 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1137 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
1078 | if (reinit && netif_running(adapter->netdev)) { | 1138 | if (netif_running(adapter->netdev)) { |
1079 | adapter->pool_config = 1; | 1139 | adapter->pool_config = 1; |
1080 | ibmveth_close(adapter->netdev); | 1140 | ibmveth_close(adapter->netdev); |
1081 | adapter->pool_config = 0; | 1141 | adapter->pool_config = 0; |
1082 | dev->mtu = new_mtu; | 1142 | dev->mtu = new_mtu; |
1083 | if ((rc = ibmveth_open(adapter->netdev))) | 1143 | vio_cmo_set_dev_desired(viodev, |
1084 | return rc; | 1144 | ibmveth_get_desired_dma |
1085 | } else | 1145 | (viodev)); |
1086 | dev->mtu = new_mtu; | 1146 | return ibmveth_open(adapter->netdev); |
1147 | } | ||
1148 | dev->mtu = new_mtu; | ||
1149 | vio_cmo_set_dev_desired(viodev, | ||
1150 | ibmveth_get_desired_dma | ||
1151 | (viodev)); | ||
1087 | return 0; | 1152 | return 0; |
1088 | } | 1153 | } |
1089 | } | 1154 | } |
@@ -1098,6 +1163,46 @@ static void ibmveth_poll_controller(struct net_device *dev) | |||
1098 | } | 1163 | } |
1099 | #endif | 1164 | #endif |
1100 | 1165 | ||
1166 | /** | ||
1167 | * ibmveth_get_desired_dma - Calculate IO memory desired by the driver | ||
1168 | * | ||
1169 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | ||
1170 | * | ||
1171 | * Return value: | ||
1172 | * Number of bytes of IO data the driver will need to perform well. | ||
1173 | */ | ||
1174 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | ||
1175 | { | ||
1176 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | ||
1177 | struct ibmveth_adapter *adapter; | ||
1178 | unsigned long ret; | ||
1179 | int i; | ||
1180 | int rxqentries = 1; | ||
1181 | |||
1182 | /* netdev inits at probe time along with the structures we need below*/ | ||
1183 | if (netdev == NULL) | ||
1184 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); | ||
1185 | |||
1186 | adapter = netdev_priv(netdev); | ||
1187 | |||
1188 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | ||
1189 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | ||
1190 | |||
1191 | for (i = 0; i < IbmVethNumBufferPools; i++) { | ||
1192 | /* add the size of the active receive buffers */ | ||
1193 | if (adapter->rx_buff_pool[i].active) | ||
1194 | ret += | ||
1195 | adapter->rx_buff_pool[i].size * | ||
1196 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | ||
1197 | buff_size); | ||
1198 | rxqentries += adapter->rx_buff_pool[i].size; | ||
1199 | } | ||
1200 | /* add the size of the receive queue entries */ | ||
1201 | ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); | ||
1202 | |||
1203 | return ret; | ||
1204 | } | ||
1205 | |||
1101 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 1206 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1102 | { | 1207 | { |
1103 | int rc, i; | 1208 | int rc, i; |
@@ -1242,6 +1347,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1242 | ibmveth_proc_unregister_adapter(adapter); | 1347 | ibmveth_proc_unregister_adapter(adapter); |
1243 | 1348 | ||
1244 | free_netdev(netdev); | 1349 | free_netdev(netdev); |
1350 | dev_set_drvdata(&dev->dev, NULL); | ||
1351 | |||
1245 | return 0; | 1352 | return 0; |
1246 | } | 1353 | } |
1247 | 1354 | ||
@@ -1402,14 +1509,15 @@ const char * buf, size_t count) | |||
1402 | return -EPERM; | 1509 | return -EPERM; |
1403 | } | 1510 | } |
1404 | 1511 | ||
1405 | pool->active = 0; | ||
1406 | if (netif_running(netdev)) { | 1512 | if (netif_running(netdev)) { |
1407 | adapter->pool_config = 1; | 1513 | adapter->pool_config = 1; |
1408 | ibmveth_close(netdev); | 1514 | ibmveth_close(netdev); |
1515 | pool->active = 0; | ||
1409 | adapter->pool_config = 0; | 1516 | adapter->pool_config = 0; |
1410 | if ((rc = ibmveth_open(netdev))) | 1517 | if ((rc = ibmveth_open(netdev))) |
1411 | return rc; | 1518 | return rc; |
1412 | } | 1519 | } |
1520 | pool->active = 0; | ||
1413 | } | 1521 | } |
1414 | } else if (attr == &veth_num_attr) { | 1522 | } else if (attr == &veth_num_attr) { |
1415 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | 1523 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) |
@@ -1485,6 +1593,7 @@ static struct vio_driver ibmveth_driver = { | |||
1485 | .id_table = ibmveth_device_table, | 1593 | .id_table = ibmveth_device_table, |
1486 | .probe = ibmveth_probe, | 1594 | .probe = ibmveth_probe, |
1487 | .remove = ibmveth_remove, | 1595 | .remove = ibmveth_remove, |
1596 | .get_desired_dma = ibmveth_get_desired_dma, | ||
1488 | .driver = { | 1597 | .driver = { |
1489 | .name = ibmveth_driver_name, | 1598 | .name = ibmveth_driver_name, |
1490 | .owner = THIS_MODULE, | 1599 | .owner = THIS_MODULE, |
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 41f61cd18852..d28186948752 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address, | |||
93 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) | 93 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) |
94 | 94 | ||
95 | #define IbmVethNumBufferPools 5 | 95 | #define IbmVethNumBufferPools 5 |
96 | #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ | ||
96 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ | 97 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ |
97 | #define IBMVETH_MAX_MTU 68 | 98 | #define IBMVETH_MAX_MTU 68 |
98 | #define IBMVETH_MAX_POOL_COUNT 4096 | 99 | #define IBMVETH_MAX_POOL_COUNT 4096 |
100 | #define IBMVETH_BUFF_LIST_SIZE 4096 | ||
101 | #define IBMVETH_FILT_LIST_SIZE 4096 | ||
99 | #define IBMVETH_MAX_BUF_SIZE (1024 * 128) | 102 | #define IBMVETH_MAX_BUF_SIZE (1024 * 128) |
100 | 103 | ||
101 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; | 104 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; |
@@ -143,6 +146,8 @@ struct ibmveth_adapter { | |||
143 | struct ibmveth_rx_q rx_queue; | 146 | struct ibmveth_rx_q rx_queue; |
144 | int pool_config; | 147 | int pool_config; |
145 | int rx_csum; | 148 | int rx_csum; |
149 | void *bounce_buffer; | ||
150 | dma_addr_t bounce_buffer_dma; | ||
146 | 151 | ||
147 | /* adapter specific stats */ | 152 | /* adapter specific stats */ |
148 | u64 replenish_task_cycles; | 153 | u64 replenish_task_cycles; |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cdf..c46864d626b2 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | 1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, |
1129 | skb->len, DMA_TO_DEVICE); | 1129 | skb->len, DMA_TO_DEVICE); |
1130 | 1130 | ||
1131 | if (dma_mapping_error(msg->data.addr[0])) | 1131 | if (dma_mapping_error(port->dev, msg->data.addr[0])) |
1132 | goto recycle_and_drop; | 1132 | goto recycle_and_drop; |
1133 | 1133 | ||
1134 | msg->dev = port->dev; | 1134 | msg->dev = port->dev; |
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1226 | dma_address = msg->data.addr[0]; | 1226 | dma_address = msg->data.addr[0]; |
1227 | dma_length = msg->data.len[0]; | 1227 | dma_length = msg->data.len[0]; |
1228 | 1228 | ||
1229 | if (!dma_mapping_error(dma_address)) | 1229 | if (!dma_mapping_error(msg->dev, dma_address)) |
1230 | dma_unmap_single(msg->dev, dma_address, dma_length, | 1230 | dma_unmap_single(msg->dev, dma_address, dma_length, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index bd34bf08830d..8a8b56135a58 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | |||
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | 527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, |
528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
529 | if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { | 529 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { |
530 | __free_page(priv->eq_table.icm_page); | 530 | __free_page(priv->eq_table.icm_page); |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | } | 532 | } |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296f..edc0fd588985 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
650 | mac->bufsz - LOCAL_SKB_ALIGN, | 650 | mac->bufsz - LOCAL_SKB_ALIGN, |
651 | PCI_DMA_FROMDEVICE); | 651 | PCI_DMA_FROMDEVICE); |
652 | 652 | ||
653 | if (unlikely(dma_mapping_error(dma))) { | 653 | if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { |
654 | dev_kfree_skb_irq(info->skb); | 654 | dev_kfree_skb_irq(info->skb); |
655 | break; | 655 | break; |
656 | } | 656 | } |
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | 1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
1520 | PCI_DMA_TODEVICE); | 1520 | PCI_DMA_TODEVICE); |
1521 | map_size[0] = skb_headlen(skb); | 1521 | map_size[0] = skb_headlen(skb); |
1522 | if (dma_mapping_error(map[0])) | 1522 | if (pci_dma_mapping_error(mac->dma_pdev, map[0])) |
1523 | goto out_err_nolock; | 1523 | goto out_err_nolock; |
1524 | 1524 | ||
1525 | for (i = 0; i < nfrags; i++) { | 1525 | for (i = 0; i < nfrags; i++) { |
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1529 | frag->page_offset, frag->size, | 1529 | frag->page_offset, frag->size, |
1530 | PCI_DMA_TODEVICE); | 1530 | PCI_DMA_TODEVICE); |
1531 | map_size[i+1] = frag->size; | 1531 | map_size[i+1] = frag->size; |
1532 | if (dma_mapping_error(map[i+1])) { | 1532 | if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { |
1533 | nfrags = i; | 1533 | nfrags = i; |
1534 | goto out_err_nolock; | 1534 | goto out_err_nolock; |
1535 | } | 1535 | } |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 739b3ab7bccc..ddccc074a76a 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
581 | if (file == ppp->owner) | 581 | if (file == ppp->owner) |
582 | ppp_shutdown_interface(ppp); | 582 | ppp_shutdown_interface(ppp); |
583 | } | 583 | } |
584 | if (atomic_read(&file->f_count) <= 2) { | 584 | if (atomic_long_read(&file->f_count) <= 2) { |
585 | ppp_release(NULL, file); | 585 | ppp_release(NULL, file); |
586 | err = 0; | 586 | err = 0; |
587 | } else | 587 | } else |
588 | printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", | 588 | printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", |
589 | atomic_read(&file->f_count)); | 589 | atomic_long_read(&file->f_count)); |
590 | unlock_kernel(); | 590 | unlock_kernel(); |
591 | return err; | 591 | return err; |
592 | } | 592 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352beb..e82b37bbd6c3 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
328 | qdev->lrg_buffer_len - | 328 | qdev->lrg_buffer_len - |
329 | QL_HEADER_SPACE, | 329 | QL_HEADER_SPACE, |
330 | PCI_DMA_FROMDEVICE); | 330 | PCI_DMA_FROMDEVICE); |
331 | err = pci_dma_mapping_error(map); | 331 | err = pci_dma_mapping_error(qdev->pdev, map); |
332 | if(err) { | 332 | if(err) { |
333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
334 | qdev->ndev->name, err); | 334 | qdev->ndev->name, err); |
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1919 | QL_HEADER_SPACE, | 1919 | QL_HEADER_SPACE, |
1920 | PCI_DMA_FROMDEVICE); | 1920 | PCI_DMA_FROMDEVICE); |
1921 | 1921 | ||
1922 | err = pci_dma_mapping_error(map); | 1922 | err = pci_dma_mapping_error(qdev->pdev, map); |
1923 | if(err) { | 1923 | if(err) { |
1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
1925 | qdev->ndev->name, err); | 1925 | qdev->ndev->name, err); |
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2454 | */ | 2454 | */ |
2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2456 | 2456 | ||
2457 | err = pci_dma_mapping_error(map); | 2457 | err = pci_dma_mapping_error(qdev->pdev, map); |
2458 | if(err) { | 2458 | if(err) { |
2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2460 | qdev->ndev->name, err); | 2460 | qdev->ndev->name, err); |
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2487 | sizeof(struct oal), | 2487 | sizeof(struct oal), |
2488 | PCI_DMA_TODEVICE); | 2488 | PCI_DMA_TODEVICE); |
2489 | 2489 | ||
2490 | err = pci_dma_mapping_error(map); | 2490 | err = pci_dma_mapping_error(qdev->pdev, map); |
2491 | if(err) { | 2491 | if(err) { |
2492 | 2492 | ||
2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", | 2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", |
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2514 | frag->page_offset, frag->size, | 2514 | frag->page_offset, frag->size, |
2515 | PCI_DMA_TODEVICE); | 2515 | PCI_DMA_TODEVICE); |
2516 | 2516 | ||
2517 | err = pci_dma_mapping_error(map); | 2517 | err = pci_dma_mapping_error(qdev->pdev, map); |
2518 | if(err) { | 2518 | if(err) { |
2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", | 2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", |
2520 | qdev->ndev->name, err); | 2520 | qdev->ndev->name, err); |
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2916 | QL_HEADER_SPACE, | 2916 | QL_HEADER_SPACE, |
2917 | PCI_DMA_FROMDEVICE); | 2917 | PCI_DMA_FROMDEVICE); |
2918 | 2918 | ||
2919 | err = pci_dma_mapping_error(map); | 2919 | err = pci_dma_mapping_error(qdev->pdev, map); |
2920 | if(err) { | 2920 | if(err) { |
2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2922 | qdev->ndev->name, err); | 2922 | qdev->ndev->name, err); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf048..86d77d05190a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) | |||
2512 | * Return Value: | 2512 | * Return Value: |
2513 | * SUCCESS on success or an appropriate -ve value on failure. | 2513 | * SUCCESS on success or an appropriate -ve value on failure. |
2514 | */ | 2514 | */ |
2515 | 2515 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |
2516 | static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | 2516 | int from_card_up) |
2517 | { | 2517 | { |
2518 | struct sk_buff *skb; | 2518 | struct sk_buff *skb; |
2519 | struct RxD_t *rxdp; | 2519 | struct RxD_t *rxdp; |
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2602 | rxdp1->Buffer0_ptr = pci_map_single | 2602 | rxdp1->Buffer0_ptr = pci_map_single |
2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, | 2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
2604 | PCI_DMA_FROMDEVICE); | 2604 | PCI_DMA_FROMDEVICE); |
2605 | if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 2605 | if (pci_dma_mapping_error(nic->pdev, |
2606 | rxdp1->Buffer0_ptr)) | ||
2606 | goto pci_map_failed; | 2607 | goto pci_map_failed; |
2607 | 2608 | ||
2608 | rxdp->Control_2 = | 2609 | rxdp->Control_2 = |
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2636 | rxdp3->Buffer0_ptr = | 2637 | rxdp3->Buffer0_ptr = |
2637 | pci_map_single(ring->pdev, ba->ba_0, | 2638 | pci_map_single(ring->pdev, ba->ba_0, |
2638 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2639 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2639 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) | 2640 | if (pci_dma_mapping_error(nic->pdev, |
2641 | rxdp3->Buffer0_ptr)) | ||
2640 | goto pci_map_failed; | 2642 | goto pci_map_failed; |
2641 | } else | 2643 | } else |
2642 | pci_dma_sync_single_for_device(ring->pdev, | 2644 | pci_dma_sync_single_for_device(ring->pdev, |
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2655 | (ring->pdev, skb->data, ring->mtu + 4, | 2657 | (ring->pdev, skb->data, ring->mtu + 4, |
2656 | PCI_DMA_FROMDEVICE); | 2658 | PCI_DMA_FROMDEVICE); |
2657 | 2659 | ||
2658 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 2660 | if (pci_dma_mapping_error(nic->pdev, |
2661 | rxdp3->Buffer2_ptr)) | ||
2659 | goto pci_map_failed; | 2662 | goto pci_map_failed; |
2660 | 2663 | ||
2661 | if (from_card_up) { | 2664 | if (from_card_up) { |
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2664 | ba->ba_1, BUF1_LEN, | 2667 | ba->ba_1, BUF1_LEN, |
2665 | PCI_DMA_FROMDEVICE); | 2668 | PCI_DMA_FROMDEVICE); |
2666 | 2669 | ||
2667 | if (pci_dma_mapping_error | 2670 | if (pci_dma_mapping_error(nic->pdev, |
2668 | (rxdp3->Buffer1_ptr)) { | 2671 | rxdp3->Buffer1_ptr)) { |
2669 | pci_unmap_single | 2672 | pci_unmap_single |
2670 | (ring->pdev, | 2673 | (ring->pdev, |
2671 | (dma_addr_t)(unsigned long) | 2674 | (dma_addr_t)(unsigned long) |
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2806 | } | 2809 | } |
2807 | } | 2810 | } |
2808 | 2811 | ||
2809 | static int s2io_chk_rx_buffers(struct ring_info *ring) | 2812 | static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) |
2810 | { | 2813 | { |
2811 | if (fill_rx_buffers(ring, 0) == -ENOMEM) { | 2814 | if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { |
2812 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | 2815 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
2813 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 2816 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
2814 | } | 2817 | } |
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2848 | return 0; | 2851 | return 0; |
2849 | 2852 | ||
2850 | pkts_processed = rx_intr_handler(ring, budget); | 2853 | pkts_processed = rx_intr_handler(ring, budget); |
2851 | s2io_chk_rx_buffers(ring); | 2854 | s2io_chk_rx_buffers(nic, ring); |
2852 | 2855 | ||
2853 | if (pkts_processed < budget_org) { | 2856 | if (pkts_processed < budget_org) { |
2854 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) | |||
2882 | for (i = 0; i < config->rx_ring_num; i++) { | 2885 | for (i = 0; i < config->rx_ring_num; i++) { |
2883 | ring = &mac_control->rings[i]; | 2886 | ring = &mac_control->rings[i]; |
2884 | ring_pkts_processed = rx_intr_handler(ring, budget); | 2887 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2885 | s2io_chk_rx_buffers(ring); | 2888 | s2io_chk_rx_buffers(nic, ring); |
2886 | pkts_processed += ring_pkts_processed; | 2889 | pkts_processed += ring_pkts_processed; |
2887 | budget -= ring_pkts_processed; | 2890 | budget -= ring_pkts_processed; |
2888 | if (budget <= 0) | 2891 | if (budget <= 0) |
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2939 | rx_intr_handler(&mac_control->rings[i], 0); | 2942 | rx_intr_handler(&mac_control->rings[i], 0); |
2940 | 2943 | ||
2941 | for (i = 0; i < config->rx_ring_num; i++) { | 2944 | for (i = 0; i < config->rx_ring_num; i++) { |
2942 | if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { | 2945 | if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == |
2946 | -ENOMEM) { | ||
2943 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2947 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2944 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2948 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
2945 | break; | 2949 | break; |
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4235 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, | 4239 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, |
4236 | fifo->ufo_in_band_v, | 4240 | fifo->ufo_in_band_v, |
4237 | sizeof(u64), PCI_DMA_TODEVICE); | 4241 | sizeof(u64), PCI_DMA_TODEVICE); |
4238 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4242 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4239 | goto pci_map_failed; | 4243 | goto pci_map_failed; |
4240 | txdp++; | 4244 | txdp++; |
4241 | } | 4245 | } |
4242 | 4246 | ||
4243 | txdp->Buffer_Pointer = pci_map_single | 4247 | txdp->Buffer_Pointer = pci_map_single |
4244 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 4248 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
4245 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4249 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4246 | goto pci_map_failed; | 4250 | goto pci_map_failed; |
4247 | 4251 | ||
4248 | txdp->Host_Control = (unsigned long) skb; | 4252 | txdp->Host_Control = (unsigned long) skb; |
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4345 | netif_rx_schedule(dev, &ring->napi); | 4349 | netif_rx_schedule(dev, &ring->napi); |
4346 | } else { | 4350 | } else { |
4347 | rx_intr_handler(ring, 0); | 4351 | rx_intr_handler(ring, 0); |
4348 | s2io_chk_rx_buffers(ring); | 4352 | s2io_chk_rx_buffers(sp, ring); |
4349 | } | 4353 | } |
4350 | 4354 | ||
4351 | return IRQ_HANDLED; | 4355 | return IRQ_HANDLED; |
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4826 | */ | 4830 | */ |
4827 | if (!config->napi) { | 4831 | if (!config->napi) { |
4828 | for (i = 0; i < config->rx_ring_num; i++) | 4832 | for (i = 0; i < config->rx_ring_num; i++) |
4829 | s2io_chk_rx_buffers(&mac_control->rings[i]); | 4833 | s2io_chk_rx_buffers(sp, &mac_control->rings[i]); |
4830 | } | 4834 | } |
4831 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4835 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
4832 | readl(&bar0->general_int_status); | 4836 | readl(&bar0->general_int_status); |
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6859 | pci_map_single( sp->pdev, (*skb)->data, | 6863 | pci_map_single( sp->pdev, (*skb)->data, |
6860 | size - NET_IP_ALIGN, | 6864 | size - NET_IP_ALIGN, |
6861 | PCI_DMA_FROMDEVICE); | 6865 | PCI_DMA_FROMDEVICE); |
6862 | if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 6866 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) |
6863 | goto memalloc_failed; | 6867 | goto memalloc_failed; |
6864 | rxdp->Host_Control = (unsigned long) (*skb); | 6868 | rxdp->Host_Control = (unsigned long) (*skb); |
6865 | } | 6869 | } |
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6886 | pci_map_single(sp->pdev, (*skb)->data, | 6890 | pci_map_single(sp->pdev, (*skb)->data, |
6887 | dev->mtu + 4, | 6891 | dev->mtu + 4, |
6888 | PCI_DMA_FROMDEVICE); | 6892 | PCI_DMA_FROMDEVICE); |
6889 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 6893 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) |
6890 | goto memalloc_failed; | 6894 | goto memalloc_failed; |
6891 | rxdp3->Buffer0_ptr = *temp0 = | 6895 | rxdp3->Buffer0_ptr = *temp0 = |
6892 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6896 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, |
6893 | PCI_DMA_FROMDEVICE); | 6897 | PCI_DMA_FROMDEVICE); |
6894 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { | 6898 | if (pci_dma_mapping_error(sp->pdev, |
6899 | rxdp3->Buffer0_ptr)) { | ||
6895 | pci_unmap_single (sp->pdev, | 6900 | pci_unmap_single (sp->pdev, |
6896 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6901 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6897 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6902 | dev->mtu + 4, PCI_DMA_FROMDEVICE); |
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6903 | rxdp3->Buffer1_ptr = *temp1 = | 6908 | rxdp3->Buffer1_ptr = *temp1 = |
6904 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6909 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6905 | PCI_DMA_FROMDEVICE); | 6910 | PCI_DMA_FROMDEVICE); |
6906 | if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { | 6911 | if (pci_dma_mapping_error(sp->pdev, |
6912 | rxdp3->Buffer1_ptr)) { | ||
6907 | pci_unmap_single (sp->pdev, | 6913 | pci_unmap_single (sp->pdev, |
6908 | (dma_addr_t)rxdp3->Buffer0_ptr, | 6914 | (dma_addr_t)rxdp3->Buffer0_ptr, |
6909 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 6915 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7187 | 7193 | ||
7188 | for (i = 0; i < config->rx_ring_num; i++) { | 7194 | for (i = 0; i < config->rx_ring_num; i++) { |
7189 | mac_control->rings[i].mtu = dev->mtu; | 7195 | mac_control->rings[i].mtu = dev->mtu; |
7190 | ret = fill_rx_buffers(&mac_control->rings[i], 1); | 7196 | ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); |
7191 | if (ret) { | 7197 | if (ret) { |
7192 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7198 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
7193 | dev->name); | 7199 | dev->name); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c0..0d27dd39bc09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | |||
233 | rx_buf->data, rx_buf->len, | 233 | rx_buf->data, rx_buf->len, |
234 | PCI_DMA_FROMDEVICE); | 234 | PCI_DMA_FROMDEVICE); |
235 | 235 | ||
236 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | 236 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { |
237 | dev_kfree_skb_any(rx_buf->skb); | 237 | dev_kfree_skb_any(rx_buf->skb); |
238 | rx_buf->skb = NULL; | 238 | rx_buf->skb = NULL; |
239 | return -EIO; | 239 | return -EIO; |
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
275 | 0, efx_rx_buf_size(efx), | 275 | 0, efx_rx_buf_size(efx), |
276 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
277 | 277 | ||
278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
279 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 279 | __free_pages(rx_buf->page, efx->rx_buffer_order); |
280 | rx_buf->page = NULL; | 280 | rx_buf->page = NULL; |
281 | return -EIO; | 281 | return -EIO; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f6..5e8374ab28ee 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
172 | 172 | ||
173 | /* Process all fragments */ | 173 | /* Process all fragments */ |
174 | while (1) { | 174 | while (1) { |
175 | if (unlikely(pci_dma_mapping_error(dma_addr))) | 175 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) |
176 | goto pci_err; | 176 | goto pci_err; |
177 | 177 | ||
178 | /* Store fields for marking in the per-fragment final | 178 | /* Store fields for marking in the per-fragment final |
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, |
662 | TSOH_BUFFER(tsoh), header_len, | 662 | TSOH_BUFFER(tsoh), header_len, |
663 | PCI_DMA_TODEVICE); | 663 | PCI_DMA_TODEVICE); |
664 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | 664 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, |
665 | tsoh->dma_addr))) { | ||
665 | kfree(tsoh); | 666 | kfree(tsoh); |
666 | return NULL; | 667 | return NULL; |
667 | } | 668 | } |
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | |||
863 | 864 | ||
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | 865 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, |
865 | len, PCI_DMA_TODEVICE); | 866 | len, PCI_DMA_TODEVICE); |
866 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | 867 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { |
867 | st->ifc.unmap_len = len; | 868 | st->ifc.unmap_len = len; |
868 | st->ifc.len = len; | 869 | st->ifc.len = len; |
869 | st->ifc.dma_addr = st->ifc.unmap_addr; | 870 | st->ifc.dma_addr = st->ifc.unmap_addr; |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb9..b6435d0d71f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
452 | /* iommu-map the skb */ | 452 | /* iommu-map the skb */ |
453 | buf = pci_map_single(card->pdev, descr->skb->data, | 453 | buf = pci_map_single(card->pdev, descr->skb->data, |
454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); | 454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
455 | if (pci_dma_mapping_error(buf)) { | 455 | if (pci_dma_mapping_error(card->pdev, buf)) { |
456 | dev_kfree_skb_any(descr->skb); | 456 | dev_kfree_skb_any(descr->skb); |
457 | descr->skb = NULL; | 457 | descr->skb = NULL; |
458 | if (netif_msg_rx_err(card) && net_ratelimit()) | 458 | if (netif_msg_rx_err(card) && net_ratelimit()) |
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
691 | unsigned long flags; | 691 | unsigned long flags; |
692 | 692 | ||
693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
694 | if (pci_dma_mapping_error(buf)) { | 694 | if (pci_dma_mapping_error(card->pdev, buf)) { |
695 | if (netif_msg_tx_err(card) && net_ratelimit()) | 695 | if (netif_msg_tx_err(card) && net_ratelimit()) |
696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " | 696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " |
697 | "Dropping packet\n", skb->data, skb->len); | 697 | "Dropping packet\n", skb->data, skb->len); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c14..8487ace9d2e3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | |||
506 | return NULL; | 506 | return NULL; |
507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | 507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, |
508 | PCI_DMA_FROMDEVICE); | 508 | PCI_DMA_FROMDEVICE); |
509 | if (pci_dma_mapping_error(*dma_handle)) { | 509 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
510 | free_page((unsigned long)buf); | 510 | free_page((unsigned long)buf); |
511 | return NULL; | 511 | return NULL; |
512 | } | 512 | } |
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, | |||
536 | return NULL; | 536 | return NULL; |
537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, | 537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, |
538 | PCI_DMA_FROMDEVICE); | 538 | PCI_DMA_FROMDEVICE); |
539 | if (pci_dma_mapping_error(*dma_handle)) { | 539 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
540 | dev_kfree_skb_any(skb); | 540 | dev_kfree_skb_any(skb); |
541 | return NULL; | 541 | return NULL; |
542 | } | 542 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c28d7cb2035b..0196a0df9021 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -19,6 +19,7 @@ | |||
19 | //#define DEBUG | 19 | //#define DEBUG |
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
22 | #include <linux/ethtool.h> | ||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | #include <linux/virtio.h> | 24 | #include <linux/virtio.h> |
24 | #include <linux/virtio_net.h> | 25 | #include <linux/virtio_net.h> |
@@ -54,9 +55,15 @@ struct virtnet_info | |||
54 | struct tasklet_struct tasklet; | 55 | struct tasklet_struct tasklet; |
55 | bool free_in_tasklet; | 56 | bool free_in_tasklet; |
56 | 57 | ||
58 | /* I like... big packets and I cannot lie! */ | ||
59 | bool big_packets; | ||
60 | |||
57 | /* Receive & send queues. */ | 61 | /* Receive & send queues. */ |
58 | struct sk_buff_head recv; | 62 | struct sk_buff_head recv; |
59 | struct sk_buff_head send; | 63 | struct sk_buff_head send; |
64 | |||
65 | /* Chain pages by the private ptr. */ | ||
66 | struct page *pages; | ||
60 | }; | 67 | }; |
61 | 68 | ||
62 | static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) | 69 | static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) |
@@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) | |||
69 | sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); | 76 | sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); |
70 | } | 77 | } |
71 | 78 | ||
79 | static void give_a_page(struct virtnet_info *vi, struct page *page) | ||
80 | { | ||
81 | page->private = (unsigned long)vi->pages; | ||
82 | vi->pages = page; | ||
83 | } | ||
84 | |||
85 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) | ||
86 | { | ||
87 | struct page *p = vi->pages; | ||
88 | |||
89 | if (p) | ||
90 | vi->pages = (struct page *)p->private; | ||
91 | else | ||
92 | p = alloc_page(gfp_mask); | ||
93 | return p; | ||
94 | } | ||
95 | |||
72 | static void skb_xmit_done(struct virtqueue *svq) | 96 | static void skb_xmit_done(struct virtqueue *svq) |
73 | { | 97 | { |
74 | struct virtnet_info *vi = svq->vdev->priv; | 98 | struct virtnet_info *vi = svq->vdev->priv; |
@@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
88 | unsigned len) | 112 | unsigned len) |
89 | { | 113 | { |
90 | struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); | 114 | struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); |
115 | int err; | ||
91 | 116 | ||
92 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | 117 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
93 | pr_debug("%s: short packet %i\n", dev->name, len); | 118 | pr_debug("%s: short packet %i\n", dev->name, len); |
@@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
95 | goto drop; | 120 | goto drop; |
96 | } | 121 | } |
97 | len -= sizeof(struct virtio_net_hdr); | 122 | len -= sizeof(struct virtio_net_hdr); |
98 | BUG_ON(len > MAX_PACKET_LEN); | ||
99 | 123 | ||
100 | skb_trim(skb, len); | 124 | if (len <= MAX_PACKET_LEN) { |
125 | unsigned int i; | ||
101 | 126 | ||
127 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
128 | give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); | ||
129 | skb->data_len = 0; | ||
130 | skb_shinfo(skb)->nr_frags = 0; | ||
131 | } | ||
132 | |||
133 | err = pskb_trim(skb, len); | ||
134 | if (err) { | ||
135 | pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); | ||
136 | dev->stats.rx_dropped++; | ||
137 | goto drop; | ||
138 | } | ||
139 | skb->truesize += skb->data_len; | ||
102 | dev->stats.rx_bytes += skb->len; | 140 | dev->stats.rx_bytes += skb->len; |
103 | dev->stats.rx_packets++; | 141 | dev->stats.rx_packets++; |
104 | 142 | ||
@@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi) | |||
160 | { | 198 | { |
161 | struct sk_buff *skb; | 199 | struct sk_buff *skb; |
162 | struct scatterlist sg[2+MAX_SKB_FRAGS]; | 200 | struct scatterlist sg[2+MAX_SKB_FRAGS]; |
163 | int num, err; | 201 | int num, err, i; |
164 | 202 | ||
165 | sg_init_table(sg, 2+MAX_SKB_FRAGS); | 203 | sg_init_table(sg, 2+MAX_SKB_FRAGS); |
166 | for (;;) { | 204 | for (;;) { |
@@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi) | |||
170 | 208 | ||
171 | skb_put(skb, MAX_PACKET_LEN); | 209 | skb_put(skb, MAX_PACKET_LEN); |
172 | vnet_hdr_to_sg(sg, skb); | 210 | vnet_hdr_to_sg(sg, skb); |
211 | |||
212 | if (vi->big_packets) { | ||
213 | for (i = 0; i < MAX_SKB_FRAGS; i++) { | ||
214 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | ||
215 | f->page = get_a_page(vi, GFP_ATOMIC); | ||
216 | if (!f->page) | ||
217 | break; | ||
218 | |||
219 | f->page_offset = 0; | ||
220 | f->size = PAGE_SIZE; | ||
221 | |||
222 | skb->data_len += PAGE_SIZE; | ||
223 | skb->len += PAGE_SIZE; | ||
224 | |||
225 | skb_shinfo(skb)->nr_frags++; | ||
226 | } | ||
227 | } | ||
228 | |||
173 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | 229 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; |
174 | skb_queue_head(&vi->recv, skb); | 230 | skb_queue_head(&vi->recv, skb); |
175 | 231 | ||
@@ -335,16 +391,11 @@ again: | |||
335 | free_old_xmit_skbs(vi); | 391 | free_old_xmit_skbs(vi); |
336 | 392 | ||
337 | /* If we has a buffer left over from last time, send it now. */ | 393 | /* If we has a buffer left over from last time, send it now. */ |
338 | if (unlikely(vi->last_xmit_skb)) { | 394 | if (unlikely(vi->last_xmit_skb) && |
339 | if (xmit_skb(vi, vi->last_xmit_skb) != 0) { | 395 | xmit_skb(vi, vi->last_xmit_skb) != 0) |
340 | /* Drop this skb: we only queue one. */ | 396 | goto stop_queue; |
341 | vi->dev->stats.tx_dropped++; | 397 | |
342 | kfree_skb(skb); | 398 | vi->last_xmit_skb = NULL; |
343 | skb = NULL; | ||
344 | goto stop_queue; | ||
345 | } | ||
346 | vi->last_xmit_skb = NULL; | ||
347 | } | ||
348 | 399 | ||
349 | /* Put new one in send queue and do transmit */ | 400 | /* Put new one in send queue and do transmit */ |
350 | if (likely(skb)) { | 401 | if (likely(skb)) { |
@@ -370,6 +421,11 @@ stop_queue: | |||
370 | netif_start_queue(dev); | 421 | netif_start_queue(dev); |
371 | goto again; | 422 | goto again; |
372 | } | 423 | } |
424 | if (skb) { | ||
425 | /* Drop this skb: we only queue one. */ | ||
426 | vi->dev->stats.tx_dropped++; | ||
427 | kfree_skb(skb); | ||
428 | } | ||
373 | goto done; | 429 | goto done; |
374 | } | 430 | } |
375 | 431 | ||
@@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev) | |||
408 | return 0; | 464 | return 0; |
409 | } | 465 | } |
410 | 466 | ||
467 | static int virtnet_set_tx_csum(struct net_device *dev, u32 data) | ||
468 | { | ||
469 | struct virtnet_info *vi = netdev_priv(dev); | ||
470 | struct virtio_device *vdev = vi->vdev; | ||
471 | |||
472 | if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) | ||
473 | return -ENOSYS; | ||
474 | |||
475 | return ethtool_op_set_tx_hw_csum(dev, data); | ||
476 | } | ||
477 | |||
478 | static struct ethtool_ops virtnet_ethtool_ops = { | ||
479 | .set_tx_csum = virtnet_set_tx_csum, | ||
480 | .set_sg = ethtool_op_set_sg, | ||
481 | }; | ||
482 | |||
411 | static int virtnet_probe(struct virtio_device *vdev) | 483 | static int virtnet_probe(struct virtio_device *vdev) |
412 | { | 484 | { |
413 | int err; | 485 | int err; |
@@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
427 | #ifdef CONFIG_NET_POLL_CONTROLLER | 499 | #ifdef CONFIG_NET_POLL_CONTROLLER |
428 | dev->poll_controller = virtnet_netpoll; | 500 | dev->poll_controller = virtnet_netpoll; |
429 | #endif | 501 | #endif |
502 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); | ||
430 | SET_NETDEV_DEV(dev, &vdev->dev); | 503 | SET_NETDEV_DEV(dev, &vdev->dev); |
431 | 504 | ||
432 | /* Do we support "hardware" checksums? */ | 505 | /* Do we support "hardware" checksums? */ |
@@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
462 | vi->dev = dev; | 535 | vi->dev = dev; |
463 | vi->vdev = vdev; | 536 | vi->vdev = vdev; |
464 | vdev->priv = vi; | 537 | vdev->priv = vi; |
538 | vi->pages = NULL; | ||
465 | 539 | ||
466 | /* If they give us a callback when all buffers are done, we don't need | 540 | /* If they give us a callback when all buffers are done, we don't need |
467 | * the timer. */ | 541 | * the timer. */ |
468 | vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); | 542 | vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); |
469 | 543 | ||
544 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | ||
545 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) | ||
546 | || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) | ||
547 | || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) | ||
548 | vi->big_packets = true; | ||
549 | |||
470 | /* We expect two virtqueues, receive then send. */ | 550 | /* We expect two virtqueues, receive then send. */ |
471 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); | 551 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); |
472 | if (IS_ERR(vi->rvq)) { | 552 | if (IS_ERR(vi->rvq)) { |
@@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
541 | vdev->config->del_vq(vi->svq); | 621 | vdev->config->del_vq(vi->svq); |
542 | vdev->config->del_vq(vi->rvq); | 622 | vdev->config->del_vq(vi->rvq); |
543 | unregister_netdev(vi->dev); | 623 | unregister_netdev(vi->dev); |
624 | |||
625 | while (vi->pages) | ||
626 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); | ||
627 | |||
544 | free_netdev(vi->dev); | 628 | free_netdev(vi->dev); |
545 | } | 629 | } |
546 | 630 | ||
@@ -553,7 +637,9 @@ static unsigned int features[] = { | |||
553 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 637 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
554 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 638 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
555 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 639 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
556 | VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, | 640 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
641 | VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ | ||
642 | VIRTIO_F_NOTIFY_ON_EMPTY, | ||
557 | }; | 643 | }; |
558 | 644 | ||
559 | static struct virtio_driver virtio_net = { | 645 | static struct virtio_driver virtio_net = { |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a9..d9769c527346 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1166 | bf->skb = skb; | 1166 | bf->skb = skb; |
1167 | bf->skbaddr = pci_map_single(sc->pdev, | 1167 | bf->skbaddr = pci_map_single(sc->pdev, |
1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | 1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); |
1169 | if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { | 1169 | if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { |
1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | 1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); |
1171 | dev_kfree_skb(skb); | 1171 | dev_kfree_skb(skb); |
1172 | bf->skb = NULL; | 1172 | bf->skb = NULL; |
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | 1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " |
1919 | "skbaddr %llx\n", skb, skb->data, skb->len, | 1919 | "skbaddr %llx\n", skb, skb->data, skb->len, |
1920 | (unsigned long long)bf->skbaddr); | 1920 | (unsigned long long)bf->skbaddr); |
1921 | if (pci_dma_mapping_error(bf->skbaddr)) { | 1921 | if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { |
1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | 1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); |
1923 | return -EIO; | 1923 | return -EIO; |
1924 | } | 1924 | } |