diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 157 |
1 files changed, 68 insertions, 89 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 71d2c5cfdad9..eb9f8f3638e1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -876,7 +876,7 @@ static void tg3_mdio_config(struct tg3 *tp) | |||
876 | { | 876 | { |
877 | u32 val; | 877 | u32 val; |
878 | 878 | ||
879 | if (tp->mdio_bus.phy_map[PHY_ADDR]->interface != | 879 | if (tp->mdio_bus->phy_map[PHY_ADDR]->interface != |
880 | PHY_INTERFACE_MODE_RGMII) | 880 | PHY_INTERFACE_MODE_RGMII) |
881 | return; | 881 | return; |
882 | 882 | ||
@@ -920,9 +920,9 @@ static void tg3_mdio_config(struct tg3 *tp) | |||
920 | static void tg3_mdio_start(struct tg3 *tp) | 920 | static void tg3_mdio_start(struct tg3 *tp) |
921 | { | 921 | { |
922 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | 922 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { |
923 | mutex_lock(&tp->mdio_bus.mdio_lock); | 923 | mutex_lock(&tp->mdio_bus->mdio_lock); |
924 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | 924 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; |
925 | mutex_unlock(&tp->mdio_bus.mdio_lock); | 925 | mutex_unlock(&tp->mdio_bus->mdio_lock); |
926 | } | 926 | } |
927 | 927 | ||
928 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; | 928 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; |
@@ -936,9 +936,9 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
936 | static void tg3_mdio_stop(struct tg3 *tp) | 936 | static void tg3_mdio_stop(struct tg3 *tp) |
937 | { | 937 | { |
938 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | 938 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { |
939 | mutex_lock(&tp->mdio_bus.mdio_lock); | 939 | mutex_lock(&tp->mdio_bus->mdio_lock); |
940 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; | 940 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; |
941 | mutex_unlock(&tp->mdio_bus.mdio_lock); | 941 | mutex_unlock(&tp->mdio_bus->mdio_lock); |
942 | } | 942 | } |
943 | } | 943 | } |
944 | 944 | ||
@@ -947,7 +947,6 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
947 | int i; | 947 | int i; |
948 | u32 reg; | 948 | u32 reg; |
949 | struct phy_device *phydev; | 949 | struct phy_device *phydev; |
950 | struct mii_bus *mdio_bus = &tp->mdio_bus; | ||
951 | 950 | ||
952 | tg3_mdio_start(tp); | 951 | tg3_mdio_start(tp); |
953 | 952 | ||
@@ -955,21 +954,23 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
955 | (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) | 954 | (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) |
956 | return 0; | 955 | return 0; |
957 | 956 | ||
958 | memset(mdio_bus, 0, sizeof(*mdio_bus)); | 957 | tp->mdio_bus = mdiobus_alloc(); |
958 | if (tp->mdio_bus == NULL) | ||
959 | return -ENOMEM; | ||
959 | 960 | ||
960 | mdio_bus->name = "tg3 mdio bus"; | 961 | tp->mdio_bus->name = "tg3 mdio bus"; |
961 | snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x", | 962 | snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", |
962 | (tp->pdev->bus->number << 8) | tp->pdev->devfn); | 963 | (tp->pdev->bus->number << 8) | tp->pdev->devfn); |
963 | mdio_bus->priv = tp; | 964 | tp->mdio_bus->priv = tp; |
964 | mdio_bus->dev = &tp->pdev->dev; | 965 | tp->mdio_bus->parent = &tp->pdev->dev; |
965 | mdio_bus->read = &tg3_mdio_read; | 966 | tp->mdio_bus->read = &tg3_mdio_read; |
966 | mdio_bus->write = &tg3_mdio_write; | 967 | tp->mdio_bus->write = &tg3_mdio_write; |
967 | mdio_bus->reset = &tg3_mdio_reset; | 968 | tp->mdio_bus->reset = &tg3_mdio_reset; |
968 | mdio_bus->phy_mask = ~(1 << PHY_ADDR); | 969 | tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); |
969 | mdio_bus->irq = &tp->mdio_irq[0]; | 970 | tp->mdio_bus->irq = &tp->mdio_irq[0]; |
970 | 971 | ||
971 | for (i = 0; i < PHY_MAX_ADDR; i++) | 972 | for (i = 0; i < PHY_MAX_ADDR; i++) |
972 | mdio_bus->irq[i] = PHY_POLL; | 973 | tp->mdio_bus->irq[i] = PHY_POLL; |
973 | 974 | ||
974 | /* The bus registration will look for all the PHYs on the mdio bus. | 975 | /* The bus registration will look for all the PHYs on the mdio bus. |
975 | * Unfortunately, it does not ensure the PHY is powered up before | 976 | * Unfortunately, it does not ensure the PHY is powered up before |
@@ -979,7 +980,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
979 | if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) | 980 | if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) |
980 | tg3_bmcr_reset(tp); | 981 | tg3_bmcr_reset(tp); |
981 | 982 | ||
982 | i = mdiobus_register(mdio_bus); | 983 | i = mdiobus_register(tp->mdio_bus); |
983 | if (i) { | 984 | if (i) { |
984 | printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", | 985 | printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", |
985 | tp->dev->name, i); | 986 | tp->dev->name, i); |
@@ -988,7 +989,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
988 | 989 | ||
989 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; | 990 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; |
990 | 991 | ||
991 | phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 992 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
992 | 993 | ||
993 | switch (phydev->phy_id) { | 994 | switch (phydev->phy_id) { |
994 | case TG3_PHY_ID_BCM50610: | 995 | case TG3_PHY_ID_BCM50610: |
@@ -1014,7 +1015,8 @@ static void tg3_mdio_fini(struct tg3 *tp) | |||
1014 | { | 1015 | { |
1015 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | 1016 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { |
1016 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; | 1017 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; |
1017 | mdiobus_unregister(&tp->mdio_bus); | 1018 | mdiobus_unregister(tp->mdio_bus); |
1019 | mdiobus_free(tp->mdio_bus); | ||
1018 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | 1020 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; |
1019 | } | 1021 | } |
1020 | } | 1022 | } |
@@ -1220,7 +1222,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) | |||
1220 | u32 old_tx_mode = tp->tx_mode; | 1222 | u32 old_tx_mode = tp->tx_mode; |
1221 | 1223 | ||
1222 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 1224 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) |
1223 | autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg; | 1225 | autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; |
1224 | else | 1226 | else |
1225 | autoneg = tp->link_config.autoneg; | 1227 | autoneg = tp->link_config.autoneg; |
1226 | 1228 | ||
@@ -1257,7 +1259,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
1257 | u8 oldflowctrl, linkmesg = 0; | 1259 | u8 oldflowctrl, linkmesg = 0; |
1258 | u32 mac_mode, lcl_adv, rmt_adv; | 1260 | u32 mac_mode, lcl_adv, rmt_adv; |
1259 | struct tg3 *tp = netdev_priv(dev); | 1261 | struct tg3 *tp = netdev_priv(dev); |
1260 | struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 1262 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
1261 | 1263 | ||
1262 | spin_lock(&tp->lock); | 1264 | spin_lock(&tp->lock); |
1263 | 1265 | ||
@@ -1334,7 +1336,7 @@ static int tg3_phy_init(struct tg3 *tp) | |||
1334 | /* Bring the PHY back to a known state. */ | 1336 | /* Bring the PHY back to a known state. */ |
1335 | tg3_bmcr_reset(tp); | 1337 | tg3_bmcr_reset(tp); |
1336 | 1338 | ||
1337 | phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 1339 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
1338 | 1340 | ||
1339 | /* Attach the MAC to the PHY. */ | 1341 | /* Attach the MAC to the PHY. */ |
1340 | phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link, | 1342 | phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link, |
@@ -1367,7 +1369,7 @@ static void tg3_phy_start(struct tg3 *tp) | |||
1367 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1369 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1368 | return; | 1370 | return; |
1369 | 1371 | ||
1370 | phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 1372 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
1371 | 1373 | ||
1372 | if (tp->link_config.phy_is_low_power) { | 1374 | if (tp->link_config.phy_is_low_power) { |
1373 | tp->link_config.phy_is_low_power = 0; | 1375 | tp->link_config.phy_is_low_power = 0; |
@@ -1387,13 +1389,13 @@ static void tg3_phy_stop(struct tg3 *tp) | |||
1387 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1389 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1388 | return; | 1390 | return; |
1389 | 1391 | ||
1390 | phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]); | 1392 | phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); |
1391 | } | 1393 | } |
1392 | 1394 | ||
1393 | static void tg3_phy_fini(struct tg3 *tp) | 1395 | static void tg3_phy_fini(struct tg3 *tp) |
1394 | { | 1396 | { |
1395 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 1397 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { |
1396 | phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]); | 1398 | phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); |
1397 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; | 1399 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; |
1398 | } | 1400 | } |
1399 | } | 1401 | } |
@@ -2049,7 +2051,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2049 | struct phy_device *phydev; | 2051 | struct phy_device *phydev; |
2050 | u32 advertising; | 2052 | u32 advertising; |
2051 | 2053 | ||
2052 | phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 2054 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
2053 | 2055 | ||
2054 | tp->link_config.phy_is_low_power = 1; | 2056 | tp->link_config.phy_is_low_power = 1; |
2055 | 2057 | ||
@@ -3861,10 +3863,7 @@ static void tg3_tx(struct tg3 *tp) | |||
3861 | return; | 3863 | return; |
3862 | } | 3864 | } |
3863 | 3865 | ||
3864 | pci_unmap_single(tp->pdev, | 3866 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); |
3865 | pci_unmap_addr(ri, mapping), | ||
3866 | skb_headlen(skb), | ||
3867 | PCI_DMA_TODEVICE); | ||
3868 | 3867 | ||
3869 | ri->skb = NULL; | 3868 | ri->skb = NULL; |
3870 | 3869 | ||
@@ -3874,12 +3873,6 @@ static void tg3_tx(struct tg3 *tp) | |||
3874 | ri = &tp->tx_buffers[sw_idx]; | 3873 | ri = &tp->tx_buffers[sw_idx]; |
3875 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) | 3874 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) |
3876 | tx_bug = 1; | 3875 | tx_bug = 1; |
3877 | |||
3878 | pci_unmap_page(tp->pdev, | ||
3879 | pci_unmap_addr(ri, mapping), | ||
3880 | skb_shinfo(skb)->frags[i].size, | ||
3881 | PCI_DMA_TODEVICE); | ||
3882 | |||
3883 | sw_idx = NEXT_TX(sw_idx); | 3876 | sw_idx = NEXT_TX(sw_idx); |
3884 | } | 3877 | } |
3885 | 3878 | ||
@@ -4633,12 +4626,16 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
4633 | } else { | 4626 | } else { |
4634 | /* New SKB is guaranteed to be linear. */ | 4627 | /* New SKB is guaranteed to be linear. */ |
4635 | entry = *start; | 4628 | entry = *start; |
4636 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | 4629 | ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); |
4637 | PCI_DMA_TODEVICE); | 4630 | new_addr = skb_shinfo(new_skb)->dma_maps[0]; |
4631 | |||
4638 | /* Make sure new skb does not cross any 4G boundaries. | 4632 | /* Make sure new skb does not cross any 4G boundaries. |
4639 | * Drop the packet if it does. | 4633 | * Drop the packet if it does. |
4640 | */ | 4634 | */ |
4641 | if (tg3_4g_overflow_test(new_addr, new_skb->len)) { | 4635 | if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { |
4636 | if (!ret) | ||
4637 | skb_dma_unmap(&tp->pdev->dev, new_skb, | ||
4638 | DMA_TO_DEVICE); | ||
4642 | ret = -1; | 4639 | ret = -1; |
4643 | dev_kfree_skb(new_skb); | 4640 | dev_kfree_skb(new_skb); |
4644 | new_skb = NULL; | 4641 | new_skb = NULL; |
@@ -4652,18 +4649,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
4652 | /* Now clean up the sw ring entries. */ | 4649 | /* Now clean up the sw ring entries. */ |
4653 | i = 0; | 4650 | i = 0; |
4654 | while (entry != last_plus_one) { | 4651 | while (entry != last_plus_one) { |
4655 | int len; | ||
4656 | |||
4657 | if (i == 0) | ||
4658 | len = skb_headlen(skb); | ||
4659 | else | ||
4660 | len = skb_shinfo(skb)->frags[i-1].size; | ||
4661 | pci_unmap_single(tp->pdev, | ||
4662 | pci_unmap_addr(&tp->tx_buffers[entry], mapping), | ||
4663 | len, PCI_DMA_TODEVICE); | ||
4664 | if (i == 0) { | 4652 | if (i == 0) { |
4665 | tp->tx_buffers[entry].skb = new_skb; | 4653 | tp->tx_buffers[entry].skb = new_skb; |
4666 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr); | ||
4667 | } else { | 4654 | } else { |
4668 | tp->tx_buffers[entry].skb = NULL; | 4655 | tp->tx_buffers[entry].skb = NULL; |
4669 | } | 4656 | } |
@@ -4671,6 +4658,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
4671 | i++; | 4658 | i++; |
4672 | } | 4659 | } |
4673 | 4660 | ||
4661 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | ||
4674 | dev_kfree_skb(skb); | 4662 | dev_kfree_skb(skb); |
4675 | 4663 | ||
4676 | return ret; | 4664 | return ret; |
@@ -4705,8 +4693,9 @@ static void tg3_set_txd(struct tg3 *tp, int entry, | |||
4705 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4693 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
4706 | { | 4694 | { |
4707 | struct tg3 *tp = netdev_priv(dev); | 4695 | struct tg3 *tp = netdev_priv(dev); |
4708 | dma_addr_t mapping; | ||
4709 | u32 len, entry, base_flags, mss; | 4696 | u32 len, entry, base_flags, mss; |
4697 | struct skb_shared_info *sp; | ||
4698 | dma_addr_t mapping; | ||
4710 | 4699 | ||
4711 | len = skb_headlen(skb); | 4700 | len = skb_headlen(skb); |
4712 | 4701 | ||
@@ -4765,11 +4754,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4765 | (vlan_tx_tag_get(skb) << 16)); | 4754 | (vlan_tx_tag_get(skb) << 16)); |
4766 | #endif | 4755 | #endif |
4767 | 4756 | ||
4768 | /* Queue skb data, a.k.a. the main skb fragment. */ | 4757 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { |
4769 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 4758 | dev_kfree_skb(skb); |
4759 | goto out_unlock; | ||
4760 | } | ||
4761 | |||
4762 | sp = skb_shinfo(skb); | ||
4763 | |||
4764 | mapping = sp->dma_maps[0]; | ||
4770 | 4765 | ||
4771 | tp->tx_buffers[entry].skb = skb; | 4766 | tp->tx_buffers[entry].skb = skb; |
4772 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
4773 | 4767 | ||
4774 | tg3_set_txd(tp, entry, mapping, len, base_flags, | 4768 | tg3_set_txd(tp, entry, mapping, len, base_flags, |
4775 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 4769 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); |
@@ -4785,13 +4779,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4785 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 4779 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4786 | 4780 | ||
4787 | len = frag->size; | 4781 | len = frag->size; |
4788 | mapping = pci_map_page(tp->pdev, | 4782 | mapping = sp->dma_maps[i + 1]; |
4789 | frag->page, | ||
4790 | frag->page_offset, | ||
4791 | len, PCI_DMA_TODEVICE); | ||
4792 | |||
4793 | tp->tx_buffers[entry].skb = NULL; | 4783 | tp->tx_buffers[entry].skb = NULL; |
4794 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
4795 | 4784 | ||
4796 | tg3_set_txd(tp, entry, mapping, len, | 4785 | tg3_set_txd(tp, entry, mapping, len, |
4797 | base_flags, (i == last) | (mss << 1)); | 4786 | base_flags, (i == last) | (mss << 1)); |
@@ -4859,9 +4848,10 @@ tg3_tso_bug_end: | |||
4859 | static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | 4848 | static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) |
4860 | { | 4849 | { |
4861 | struct tg3 *tp = netdev_priv(dev); | 4850 | struct tg3 *tp = netdev_priv(dev); |
4862 | dma_addr_t mapping; | ||
4863 | u32 len, entry, base_flags, mss; | 4851 | u32 len, entry, base_flags, mss; |
4852 | struct skb_shared_info *sp; | ||
4864 | int would_hit_hwbug; | 4853 | int would_hit_hwbug; |
4854 | dma_addr_t mapping; | ||
4865 | 4855 | ||
4866 | len = skb_headlen(skb); | 4856 | len = skb_headlen(skb); |
4867 | 4857 | ||
@@ -4942,11 +4932,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4942 | (vlan_tx_tag_get(skb) << 16)); | 4932 | (vlan_tx_tag_get(skb) << 16)); |
4943 | #endif | 4933 | #endif |
4944 | 4934 | ||
4945 | /* Queue skb data, a.k.a. the main skb fragment. */ | 4935 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { |
4946 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 4936 | dev_kfree_skb(skb); |
4937 | goto out_unlock; | ||
4938 | } | ||
4939 | |||
4940 | sp = skb_shinfo(skb); | ||
4941 | |||
4942 | mapping = sp->dma_maps[0]; | ||
4947 | 4943 | ||
4948 | tp->tx_buffers[entry].skb = skb; | 4944 | tp->tx_buffers[entry].skb = skb; |
4949 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
4950 | 4945 | ||
4951 | would_hit_hwbug = 0; | 4946 | would_hit_hwbug = 0; |
4952 | 4947 | ||
@@ -4969,13 +4964,9 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4969 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 4964 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4970 | 4965 | ||
4971 | len = frag->size; | 4966 | len = frag->size; |
4972 | mapping = pci_map_page(tp->pdev, | 4967 | mapping = sp->dma_maps[i + 1]; |
4973 | frag->page, | ||
4974 | frag->page_offset, | ||
4975 | len, PCI_DMA_TODEVICE); | ||
4976 | 4968 | ||
4977 | tp->tx_buffers[entry].skb = NULL; | 4969 | tp->tx_buffers[entry].skb = NULL; |
4978 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
4979 | 4970 | ||
4980 | if (tg3_4g_overflow_test(mapping, len)) | 4971 | if (tg3_4g_overflow_test(mapping, len)) |
4981 | would_hit_hwbug = 1; | 4972 | would_hit_hwbug = 1; |
@@ -5128,7 +5119,6 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5128 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | 5119 | for (i = 0; i < TG3_TX_RING_SIZE; ) { |
5129 | struct tx_ring_info *txp; | 5120 | struct tx_ring_info *txp; |
5130 | struct sk_buff *skb; | 5121 | struct sk_buff *skb; |
5131 | int j; | ||
5132 | 5122 | ||
5133 | txp = &tp->tx_buffers[i]; | 5123 | txp = &tp->tx_buffers[i]; |
5134 | skb = txp->skb; | 5124 | skb = txp->skb; |
@@ -5138,22 +5128,11 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5138 | continue; | 5128 | continue; |
5139 | } | 5129 | } |
5140 | 5130 | ||
5141 | pci_unmap_single(tp->pdev, | 5131 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); |
5142 | pci_unmap_addr(txp, mapping), | ||
5143 | skb_headlen(skb), | ||
5144 | PCI_DMA_TODEVICE); | ||
5145 | txp->skb = NULL; | ||
5146 | 5132 | ||
5147 | i++; | 5133 | txp->skb = NULL; |
5148 | 5134 | ||
5149 | for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { | 5135 | i += skb_shinfo(skb)->nr_frags + 1; |
5150 | txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; | ||
5151 | pci_unmap_page(tp->pdev, | ||
5152 | pci_unmap_addr(txp, mapping), | ||
5153 | skb_shinfo(skb)->frags[j].size, | ||
5154 | PCI_DMA_TODEVICE); | ||
5155 | i++; | ||
5156 | } | ||
5157 | 5136 | ||
5158 | dev_kfree_skb_any(skb); | 5137 | dev_kfree_skb_any(skb); |
5159 | } | 5138 | } |
@@ -8977,7 +8956,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
8977 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 8956 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
8978 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 8957 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
8979 | return -EAGAIN; | 8958 | return -EAGAIN; |
8980 | return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd); | 8959 | return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); |
8981 | } | 8960 | } |
8982 | 8961 | ||
8983 | cmd->supported = (SUPPORTED_Autoneg); | 8962 | cmd->supported = (SUPPORTED_Autoneg); |
@@ -9018,7 +8997,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9018 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 8997 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9019 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 8998 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9020 | return -EAGAIN; | 8999 | return -EAGAIN; |
9021 | return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd); | 9000 | return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); |
9022 | } | 9001 | } |
9023 | 9002 | ||
9024 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { | 9003 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { |
@@ -9166,7 +9145,7 @@ static int tg3_nway_reset(struct net_device *dev) | |||
9166 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9145 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9167 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9146 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9168 | return -EAGAIN; | 9147 | return -EAGAIN; |
9169 | r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]); | 9148 | r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); |
9170 | } else { | 9149 | } else { |
9171 | u32 bmcr; | 9150 | u32 bmcr; |
9172 | 9151 | ||
@@ -9283,7 +9262,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9283 | u32 newadv; | 9262 | u32 newadv; |
9284 | struct phy_device *phydev; | 9263 | struct phy_device *phydev; |
9285 | 9264 | ||
9286 | phydev = tp->mdio_bus.phy_map[PHY_ADDR]; | 9265 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
9287 | 9266 | ||
9288 | if (epause->rx_pause) { | 9267 | if (epause->rx_pause) { |
9289 | if (epause->tx_pause) | 9268 | if (epause->tx_pause) |
@@ -10265,7 +10244,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10265 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 10244 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
10266 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 10245 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
10267 | return -EAGAIN; | 10246 | return -EAGAIN; |
10268 | return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd); | 10247 | return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); |
10269 | } | 10248 | } |
10270 | 10249 | ||
10271 | switch(cmd) { | 10250 | switch(cmd) { |