diff options
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r-- | drivers/net/b44.c | 56 |
1 files changed, 24 insertions, 32 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 879a2fff474e..96fb0ec905a7 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/ethtool.h> | 15 | #include <linux/ethtool.h> |
16 | #include <linux/mii.h> | 16 | #include <linux/mii.h> |
17 | #include <linux/if_ether.h> | 17 | #include <linux/if_ether.h> |
18 | #include <linux/if_vlan.h> | ||
18 | #include <linux/etherdevice.h> | 19 | #include <linux/etherdevice.h> |
19 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
@@ -68,8 +69,8 @@ | |||
68 | (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) | 69 | (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) |
69 | #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) | 70 | #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) |
70 | 71 | ||
71 | #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64) | 72 | #define RX_PKT_OFFSET 30 |
72 | #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8) | 73 | #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64) |
73 | 74 | ||
74 | /* minimum number of free TX descriptors required to wake up TX process */ | 75 | /* minimum number of free TX descriptors required to wake up TX process */ |
75 | #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) | 76 | #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) |
@@ -599,8 +600,7 @@ static void b44_timer(unsigned long __opaque) | |||
599 | 600 | ||
600 | spin_unlock_irq(&bp->lock); | 601 | spin_unlock_irq(&bp->lock); |
601 | 602 | ||
602 | bp->timer.expires = jiffies + HZ; | 603 | mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); |
603 | add_timer(&bp->timer); | ||
604 | } | 604 | } |
605 | 605 | ||
606 | static void b44_tx(struct b44 *bp) | 606 | static void b44_tx(struct b44 *bp) |
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
653 | src_map = &bp->rx_buffers[src_idx]; | 653 | src_map = &bp->rx_buffers[src_idx]; |
654 | dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); | 654 | dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); |
655 | map = &bp->rx_buffers[dest_idx]; | 655 | map = &bp->rx_buffers[dest_idx]; |
656 | skb = dev_alloc_skb(RX_PKT_BUF_SZ); | 656 | skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); |
657 | if (skb == NULL) | 657 | if (skb == NULL) |
658 | return -ENOMEM; | 658 | return -ENOMEM; |
659 | 659 | ||
@@ -669,7 +669,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
669 | if (!dma_mapping_error(mapping)) | 669 | if (!dma_mapping_error(mapping)) |
670 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | 670 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); |
671 | dev_kfree_skb_any(skb); | 671 | dev_kfree_skb_any(skb); |
672 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); | 672 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
673 | if (skb == NULL) | 673 | if (skb == NULL) |
674 | return -ENOMEM; | 674 | return -ENOMEM; |
675 | mapping = pci_map_single(bp->pdev, skb->data, | 675 | mapping = pci_map_single(bp->pdev, skb->data, |
@@ -684,11 +684,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
684 | } | 684 | } |
685 | } | 685 | } |
686 | 686 | ||
687 | skb->dev = bp->dev; | 687 | rh = (struct rx_header *) skb->data; |
688 | skb_reserve(skb, bp->rx_offset); | 688 | skb_reserve(skb, RX_PKT_OFFSET); |
689 | 689 | ||
690 | rh = (struct rx_header *) | ||
691 | (skb->data - bp->rx_offset); | ||
692 | rh->len = 0; | 690 | rh->len = 0; |
693 | rh->flags = 0; | 691 | rh->flags = 0; |
694 | 692 | ||
@@ -698,13 +696,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
698 | if (src_map != NULL) | 696 | if (src_map != NULL) |
699 | src_map->skb = NULL; | 697 | src_map->skb = NULL; |
700 | 698 | ||
701 | ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset)); | 699 | ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET)); |
702 | if (dest_idx == (B44_RX_RING_SIZE - 1)) | 700 | if (dest_idx == (B44_RX_RING_SIZE - 1)) |
703 | ctrl |= DESC_CTRL_EOT; | 701 | ctrl |= DESC_CTRL_EOT; |
704 | 702 | ||
705 | dp = &bp->rx_ring[dest_idx]; | 703 | dp = &bp->rx_ring[dest_idx]; |
706 | dp->ctrl = cpu_to_le32(ctrl); | 704 | dp->ctrl = cpu_to_le32(ctrl); |
707 | dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); | 705 | dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); |
708 | 706 | ||
709 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 707 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
710 | b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, | 708 | b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, |
@@ -783,7 +781,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
783 | PCI_DMA_FROMDEVICE); | 781 | PCI_DMA_FROMDEVICE); |
784 | rh = (struct rx_header *) skb->data; | 782 | rh = (struct rx_header *) skb->data; |
785 | len = le16_to_cpu(rh->len); | 783 | len = le16_to_cpu(rh->len); |
786 | if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || | 784 | if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || |
787 | (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { | 785 | (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { |
788 | drop_it: | 786 | drop_it: |
789 | b44_recycle_rx(bp, cons, bp->rx_prod); | 787 | b44_recycle_rx(bp, cons, bp->rx_prod); |
@@ -815,8 +813,8 @@ static int b44_rx(struct b44 *bp, int budget) | |||
815 | pci_unmap_single(bp->pdev, map, | 813 | pci_unmap_single(bp->pdev, map, |
816 | skb_size, PCI_DMA_FROMDEVICE); | 814 | skb_size, PCI_DMA_FROMDEVICE); |
817 | /* Leave out rx_header */ | 815 | /* Leave out rx_header */ |
818 | skb_put(skb, len+bp->rx_offset); | 816 | skb_put(skb, len + RX_PKT_OFFSET); |
819 | skb_pull(skb,bp->rx_offset); | 817 | skb_pull(skb, RX_PKT_OFFSET); |
820 | } else { | 818 | } else { |
821 | struct sk_buff *copy_skb; | 819 | struct sk_buff *copy_skb; |
822 | 820 | ||
@@ -828,7 +826,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
828 | skb_reserve(copy_skb, 2); | 826 | skb_reserve(copy_skb, 2); |
829 | skb_put(copy_skb, len); | 827 | skb_put(copy_skb, len); |
830 | /* DMA sync done above, copy just the actual packet */ | 828 | /* DMA sync done above, copy just the actual packet */ |
831 | skb_copy_from_linear_data_offset(skb, bp->rx_offset, | 829 | skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, |
832 | copy_skb->data, len); | 830 | copy_skb->data, len); |
833 | skb = copy_skb; | 831 | skb = copy_skb; |
834 | } | 832 | } |
@@ -969,7 +967,6 @@ static void b44_tx_timeout(struct net_device *dev) | |||
969 | static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | 967 | static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) |
970 | { | 968 | { |
971 | struct b44 *bp = netdev_priv(dev); | 969 | struct b44 *bp = netdev_priv(dev); |
972 | struct sk_buff *bounce_skb; | ||
973 | int rc = NETDEV_TX_OK; | 970 | int rc = NETDEV_TX_OK; |
974 | dma_addr_t mapping; | 971 | dma_addr_t mapping; |
975 | u32 len, entry, ctrl; | 972 | u32 len, entry, ctrl; |
@@ -987,12 +984,13 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
987 | 984 | ||
988 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 985 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
989 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 986 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
987 | struct sk_buff *bounce_skb; | ||
988 | |||
990 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 989 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
991 | if (!dma_mapping_error(mapping)) | 990 | if (!dma_mapping_error(mapping)) |
992 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | 991 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); |
993 | 992 | ||
994 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, | 993 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); |
995 | GFP_ATOMIC|GFP_DMA); | ||
996 | if (!bounce_skb) | 994 | if (!bounce_skb) |
997 | goto err_out; | 995 | goto err_out; |
998 | 996 | ||
@@ -1001,13 +999,12 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1001 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 999 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
1002 | if (!dma_mapping_error(mapping)) | 1000 | if (!dma_mapping_error(mapping)) |
1003 | pci_unmap_single(bp->pdev, mapping, | 1001 | pci_unmap_single(bp->pdev, mapping, |
1004 | len, PCI_DMA_TODEVICE); | 1002 | len, PCI_DMA_TODEVICE); |
1005 | dev_kfree_skb_any(bounce_skb); | 1003 | dev_kfree_skb_any(bounce_skb); |
1006 | goto err_out; | 1004 | goto err_out; |
1007 | } | 1005 | } |
1008 | 1006 | ||
1009 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), | 1007 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); |
1010 | skb->len); | ||
1011 | dev_kfree_skb_any(skb); | 1008 | dev_kfree_skb_any(skb); |
1012 | skb = bounce_skb; | 1009 | skb = bounce_skb; |
1013 | } | 1010 | } |
@@ -1396,12 +1393,12 @@ static void b44_init_hw(struct b44 *bp, int reset_kind) | |||
1396 | bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ | 1393 | bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ |
1397 | if (reset_kind == B44_PARTIAL_RESET) { | 1394 | if (reset_kind == B44_PARTIAL_RESET) { |
1398 | bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | | 1395 | bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | |
1399 | (bp->rx_offset << DMARX_CTRL_ROSHIFT))); | 1396 | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); |
1400 | } else { | 1397 | } else { |
1401 | bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); | 1398 | bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); |
1402 | bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); | 1399 | bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); |
1403 | bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | | 1400 | bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | |
1404 | (bp->rx_offset << DMARX_CTRL_ROSHIFT))); | 1401 | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); |
1405 | bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); | 1402 | bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); |
1406 | 1403 | ||
1407 | bw32(bp, B44_DMARX_PTR, bp->rx_pending); | 1404 | bw32(bp, B44_DMARX_PTR, bp->rx_pending); |
@@ -2093,11 +2090,6 @@ static int __devinit b44_get_invariants(struct b44 *bp) | |||
2093 | 2090 | ||
2094 | bp->phy_addr = eeprom[90] & 0x1f; | 2091 | bp->phy_addr = eeprom[90] & 0x1f; |
2095 | 2092 | ||
2096 | /* With this, plus the rx_header prepended to the data by the | ||
2097 | * hardware, we'll land the ethernet header on a 2-byte boundary. | ||
2098 | */ | ||
2099 | bp->rx_offset = 30; | ||
2100 | |||
2101 | bp->imask = IMASK_DEF; | 2093 | bp->imask = IMASK_DEF; |
2102 | 2094 | ||
2103 | bp->core_unit = ssb_core_unit(bp); | 2095 | bp->core_unit = ssb_core_unit(bp); |
@@ -2348,11 +2340,11 @@ static int b44_resume(struct pci_dev *pdev) | |||
2348 | netif_device_attach(bp->dev); | 2340 | netif_device_attach(bp->dev); |
2349 | spin_unlock_irq(&bp->lock); | 2341 | spin_unlock_irq(&bp->lock); |
2350 | 2342 | ||
2351 | bp->timer.expires = jiffies + HZ; | ||
2352 | add_timer(&bp->timer); | ||
2353 | |||
2354 | b44_enable_ints(bp); | 2343 | b44_enable_ints(bp); |
2355 | netif_wake_queue(dev); | 2344 | netif_wake_queue(dev); |
2345 | |||
2346 | mod_timer(&bp->timer, jiffies + 1); | ||
2347 | |||
2356 | return 0; | 2348 | return 0; |
2357 | } | 2349 | } |
2358 | 2350 | ||