aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-05-12 06:16:50 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-13 20:22:58 -0400
commit55c82617c3e82210b7471e9334e8fc5df6a9961f (patch)
treec2afeec6a9be04fea2b09c3fb60a4ed9e3ab66b3
parentb84bbaf7a6c8cca24f8acf25a2c8e46913a947ba (diff)
3c59x: convert to generic DMA API
This driver supports EISA devices in addition to PCI devices, and relied on the legacy behavior of the pci_dma* shims to pass on a NULL pointer to the DMA API, and the DMA API being able to handle that. When the NULL forwarding broke the EISA support got broken. Fix this by converting to the DMA API instead of the legacy PCI shims. Fixes: 4167b2ad ("PCI: Remove NULL device handling from PCI DMA API") Reported-by: tedheadster <tedheadster@gmail.com> Tested-by: tedheadster <tedheadster@gmail.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/3com/3c59x.c104
1 files changed, 51 insertions, 53 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..176861bd2252 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1212 vp->mii.reg_num_mask = 0x1f; 1212 vp->mii.reg_num_mask = 0x1f;
1213 1213
1214 /* Makes sure rings are at least 16 byte aligned. */ 1214 /* Makes sure rings are at least 16 byte aligned. */
1215 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE 1215 vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1217 &vp->rx_ring_dma); 1217 &vp->rx_ring_dma, GFP_KERNEL);
1218 retval = -ENOMEM; 1218 retval = -ENOMEM;
1219 if (!vp->rx_ring) 1219 if (!vp->rx_ring)
1220 goto free_device; 1220 goto free_device;
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1476 return 0; 1476 return 0;
1477 1477
1478free_ring: 1478free_ring:
1479 pci_free_consistent(pdev, 1479 dma_free_coherent(&pdev->dev,
1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE 1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
1481 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1481 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1482 vp->rx_ring, 1482 vp->rx_ring, vp->rx_ring_dma);
1483 vp->rx_ring_dma);
1484free_device: 1483free_device:
1485 free_netdev(dev); 1484 free_netdev(dev);
1486 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1485 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
1751 break; /* Bad news! */ 1750 break; /* Bad news! */
1752 1751
1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1752 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data, 1753 dma = dma_map_single(vp->gendev, skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1754 PKT_BUF_SZ, DMA_FROM_DEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) 1755 if (dma_mapping_error(vp->gendev, dma))
1757 break; 1756 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma); 1757 vp->rx_ring[i].addr = cpu_to_le32(dma);
1759 } 1758 }
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 if (vp->bus_master) { 2066 if (vp->bus_master) {
2068 /* Set the bus-master controller to transfer the packet. */ 2067 /* Set the bus-master controller to transfer the packet. */
2069 int len = (skb->len + 3) & ~3; 2068 int len = (skb->len + 3) & ~3;
2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2069 vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2071 PCI_DMA_TODEVICE); 2070 DMA_TO_DEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { 2071 if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb); 2072 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++; 2073 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK; 2074 return NETDEV_TX_OK;
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2168 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2167 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2169 2168
2170 if (!skb_shinfo(skb)->nr_frags) { 2169 if (!skb_shinfo(skb)->nr_frags) {
2171 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, 2170 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2172 PCI_DMA_TODEVICE); 2171 DMA_TO_DEVICE);
2173 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2172 if (dma_mapping_error(vp->gendev, dma_addr))
2174 goto out_dma_err; 2173 goto out_dma_err;
2175 2174
2176 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2175 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2178 } else { 2177 } else {
2179 int i; 2178 int i;
2180 2179
2181 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, 2180 dma_addr = dma_map_single(vp->gendev, skb->data,
2182 skb_headlen(skb), PCI_DMA_TODEVICE); 2181 skb_headlen(skb), DMA_TO_DEVICE);
2183 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2182 if (dma_mapping_error(vp->gendev, dma_addr))
2184 goto out_dma_err; 2183 goto out_dma_err;
2185 2184
2186 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2185 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2190 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2189 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2191 2190
2192 dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, 2191 dma_addr = skb_frag_dma_map(vp->gendev, frag,
2193 0, 2192 0,
2194 frag->size, 2193 frag->size,
2195 DMA_TO_DEVICE); 2194 DMA_TO_DEVICE);
2196 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { 2195 if (dma_mapping_error(vp->gendev, dma_addr)) {
2197 for(i = i-1; i >= 0; i--) 2196 for(i = i-1; i >= 0; i--)
2198 dma_unmap_page(&VORTEX_PCI(vp)->dev, 2197 dma_unmap_page(vp->gendev,
2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), 2198 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2200 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), 2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2201 DMA_TO_DEVICE); 2200 DMA_TO_DEVICE);
2202 2201
2203 pci_unmap_single(VORTEX_PCI(vp), 2202 dma_unmap_single(vp->gendev,
2204 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2203 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2205 le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2204 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2206 PCI_DMA_TODEVICE); 2205 DMA_TO_DEVICE);
2207 2206
2208 goto out_dma_err; 2207 goto out_dma_err;
2209 } 2208 }
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2218 } 2217 }
2219 } 2218 }
2220#else 2219#else
2221 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); 2220 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2222 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2221 if (dma_mapping_error(vp->gendev, dma_addr))
2223 goto out_dma_err; 2222 goto out_dma_err;
2224 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); 2223 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2225 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); 2224 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254out: 2253out:
2255 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
2256out_dma_err: 2255out_dma_err:
2257 dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); 2256 dev_err(vp->gendev, "Error mapping dma buffer\n");
2258 goto out; 2257 goto out;
2259} 2258}
2260 2259
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
2322 if (status & DMADone) { 2321 if (status & DMADone) {
2323 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { 2322 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2324 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ 2323 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2325 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); 2324 dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2326 pkts_compl++; 2325 pkts_compl++;
2327 bytes_compl += vp->tx_skb->len; 2326 bytes_compl += vp->tx_skb->len;
2328 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ 2327 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
2459 struct sk_buff *skb = vp->tx_skbuff[entry]; 2458 struct sk_buff *skb = vp->tx_skbuff[entry];
2460#if DO_ZEROCOPY 2459#if DO_ZEROCOPY
2461 int i; 2460 int i;
2462 pci_unmap_single(VORTEX_PCI(vp), 2461 dma_unmap_single(vp->gendev,
2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2462 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, 2463 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 PCI_DMA_TODEVICE); 2464 DMA_TO_DEVICE);
2466 2465
2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2466 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2468 pci_unmap_page(VORTEX_PCI(vp), 2467 dma_unmap_page(vp->gendev,
2469 le32_to_cpu(vp->tx_ring[entry].frag[i].addr), 2468 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2470 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, 2469 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2471 PCI_DMA_TODEVICE); 2470 DMA_TO_DEVICE);
2472#else 2471#else
2473 pci_unmap_single(VORTEX_PCI(vp), 2472 dma_unmap_single(vp->gendev,
2474 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); 2473 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2475#endif 2474#endif
2476 pkts_compl++; 2475 pkts_compl++;
2477 bytes_compl += skb->len; 2476 bytes_compl += skb->len;
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
2561 /* 'skb_put()' points to the start of sk_buff data area. */ 2560 /* 'skb_put()' points to the start of sk_buff data area. */
2562 if (vp->bus_master && 2561 if (vp->bus_master &&
2563 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { 2562 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2564 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), 2563 dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2565 pkt_len, PCI_DMA_FROMDEVICE); 2564 pkt_len, DMA_FROM_DEVICE);
2566 iowrite32(dma, ioaddr + Wn7_MasterAddr); 2565 iowrite32(dma, ioaddr + Wn7_MasterAddr);
2567 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); 2566 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2568 iowrite16(StartDMAUp, ioaddr + EL3_CMD); 2567 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2569 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) 2568 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2570 ; 2569 ;
2571 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); 2570 dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2572 } else { 2571 } else {
2573 ioread32_rep(ioaddr + RX_FIFO, 2572 ioread32_rep(ioaddr + RX_FIFO,
2574 skb_put(skb, pkt_len), 2573 skb_put(skb, pkt_len),
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
2635 if (pkt_len < rx_copybreak && 2634 if (pkt_len < rx_copybreak &&
2636 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { 2635 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2637 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2638 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2637 dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2639 /* 'skb_put()' points to the start of sk_buff data area. */ 2638 /* 'skb_put()' points to the start of sk_buff data area. */
2640 skb_put_data(skb, vp->rx_skbuff[entry]->data, 2639 skb_put_data(skb, vp->rx_skbuff[entry]->data,
2641 pkt_len); 2640 pkt_len);
2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2641 dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2643 vp->rx_copy++; 2642 vp->rx_copy++;
2644 } else { 2643 } else {
2645 /* Pre-allocate the replacement skb. If it or its 2644 /* Pre-allocate the replacement skb. If it or its
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
2651 dev->stats.rx_dropped++; 2650 dev->stats.rx_dropped++;
2652 goto clear_complete; 2651 goto clear_complete;
2653 } 2652 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, 2653 newdma = dma_map_single(vp->gendev, newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2654 PKT_BUF_SZ, DMA_FROM_DEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { 2655 if (dma_mapping_error(vp->gendev, newdma)) {
2657 dev->stats.rx_dropped++; 2656 dev->stats.rx_dropped++;
2658 consume_skb(newskb); 2657 consume_skb(newskb);
2659 goto clear_complete; 2658 goto clear_complete;
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
2664 vp->rx_skbuff[entry] = newskb; 2663 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma); 2664 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2666 skb_put(skb, pkt_len); 2665 skb_put(skb, pkt_len);
2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2666 dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2668 vp->rx_nocopy++; 2667 vp->rx_nocopy++;
2669 } 2668 }
2670 skb->protocol = eth_type_trans(skb, dev); 2669 skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
2761 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ 2760 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2762 for (i = 0; i < RX_RING_SIZE; i++) 2761 for (i = 0; i < RX_RING_SIZE; i++)
2763 if (vp->rx_skbuff[i]) { 2762 if (vp->rx_skbuff[i]) {
2764 pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), 2763 dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2765 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2764 PKT_BUF_SZ, DMA_FROM_DEVICE);
2766 dev_kfree_skb(vp->rx_skbuff[i]); 2765 dev_kfree_skb(vp->rx_skbuff[i]);
2767 vp->rx_skbuff[i] = NULL; 2766 vp->rx_skbuff[i] = NULL;
2768 } 2767 }
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
2775 int k; 2774 int k;
2776 2775
2777 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) 2776 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2778 pci_unmap_single(VORTEX_PCI(vp), 2777 dma_unmap_single(vp->gendev,
2779 le32_to_cpu(vp->tx_ring[i].frag[k].addr), 2778 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2780 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, 2779 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2781 PCI_DMA_TODEVICE); 2780 DMA_TO_DEVICE);
2782#else 2781#else
2783 pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); 2782 dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2784#endif 2783#endif
2785 dev_kfree_skb(skb); 2784 dev_kfree_skb(skb);
2786 vp->tx_skbuff[i] = NULL; 2785 vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
3288 3287
3289 pci_iounmap(pdev, vp->ioaddr); 3288 pci_iounmap(pdev, vp->ioaddr);
3290 3289
3291 pci_free_consistent(pdev, 3290 dma_free_coherent(&pdev->dev,
3292 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3291 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3293 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3292 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3294 vp->rx_ring, 3293 vp->rx_ring, vp->rx_ring_dma);
3295 vp->rx_ring_dma);
3296 3294
3297 pci_release_regions(pdev); 3295 pci_release_regions(pdev);
3298 3296