diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-03-06 20:30:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-03-06 20:30:59 -0500 |
commit | 205c911da322908abe127b96d2ef2a4a2aa5109a (patch) | |
tree | 43f6878c6a255965a4061d05da68dbc1df9e0ca4 /drivers | |
parent | c7276fde27bca89798f33c0be9543dc108468788 (diff) | |
parent | f3be97427172856d6865ddfedea84fa3a9f33227 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
sis900 warning fixes
mv643xx_eth: Place explicit port number in mv643xx_eth_platform_data
pcnet32: Fix PCnet32 performance bug on non-coherent architecutres
__devinit & __devexit cleanups for de2104x driver
3c59x: Handle pci_enable_device() failure while resuming
dmfe: Fix link detection
dmfe: fix two bugs
dmfe: trivial/spelling fixes
revert "drivers/net/tulip/dmfe: support basic carrier detection"
ucc_geth: returns NETDEV_TX_BUSY when BD ring is full
ucc_geth: Fix BD processing
natsemi: netpoll fixes
bonding: Improve IGMP join processing
bonding: only receive ARPs for us
bonding: fix double dev_add_pack
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/3c59x.c | 8 | ||||
-rw-r--r-- | drivers/net/bonding/bond_main.c | 86 | ||||
-rw-r--r-- | drivers/net/mv643xx_eth.c | 53 | ||||
-rw-r--r-- | drivers/net/natsemi.c | 24 | ||||
-rw-r--r-- | drivers/net/pcnet32.c | 4 | ||||
-rw-r--r-- | drivers/net/sis900.c | 10 | ||||
-rw-r--r-- | drivers/net/tulip/de2104x.c | 6 | ||||
-rw-r--r-- | drivers/net/tulip/dmfe.c | 204 | ||||
-rw-r--r-- | drivers/net/ucc_geth.c | 17 |
9 files changed, 247 insertions, 165 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 716a47210aa3..72995777f809 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -822,11 +822,17 @@ static int vortex_resume(struct pci_dev *pdev) | |||
822 | { | 822 | { |
823 | struct net_device *dev = pci_get_drvdata(pdev); | 823 | struct net_device *dev = pci_get_drvdata(pdev); |
824 | struct vortex_private *vp = netdev_priv(dev); | 824 | struct vortex_private *vp = netdev_priv(dev); |
825 | int err; | ||
825 | 826 | ||
826 | if (dev && vp) { | 827 | if (dev && vp) { |
827 | pci_set_power_state(pdev, PCI_D0); | 828 | pci_set_power_state(pdev, PCI_D0); |
828 | pci_restore_state(pdev); | 829 | pci_restore_state(pdev); |
829 | pci_enable_device(pdev); | 830 | err = pci_enable_device(pdev); |
831 | if (err) { | ||
832 | printk(KERN_WARNING "%s: Could not enable device \n", | ||
833 | dev->name); | ||
834 | return err; | ||
835 | } | ||
830 | pci_set_master(pdev); | 836 | pci_set_master(pdev); |
831 | if (request_irq(dev->irq, vp->full_bus_master_rx ? | 837 | if (request_irq(dev->irq, vp->full_bus_master_rx ? |
832 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { | 838 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ea73ebff4387..e4724d874e7c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/netdevice.h> | 61 | #include <linux/netdevice.h> |
62 | #include <linux/inetdevice.h> | 62 | #include <linux/inetdevice.h> |
63 | #include <linux/igmp.h> | ||
63 | #include <linux/etherdevice.h> | 64 | #include <linux/etherdevice.h> |
64 | #include <linux/skbuff.h> | 65 | #include <linux/skbuff.h> |
65 | #include <net/sock.h> | 66 | #include <net/sock.h> |
@@ -861,6 +862,28 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen) | |||
861 | } | 862 | } |
862 | } | 863 | } |
863 | 864 | ||
865 | |||
866 | /* | ||
867 | * Retrieve the list of registered multicast addresses for the bonding | ||
868 | * device and retransmit an IGMP JOIN request to the current active | ||
869 | * slave. | ||
870 | */ | ||
871 | static void bond_resend_igmp_join_requests(struct bonding *bond) | ||
872 | { | ||
873 | struct in_device *in_dev; | ||
874 | struct ip_mc_list *im; | ||
875 | |||
876 | rcu_read_lock(); | ||
877 | in_dev = __in_dev_get_rcu(bond->dev); | ||
878 | if (in_dev) { | ||
879 | for (im = in_dev->mc_list; im; im = im->next) { | ||
880 | ip_mc_rejoin_group(im); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | rcu_read_unlock(); | ||
885 | } | ||
886 | |||
864 | /* | 887 | /* |
865 | * Totally destroys the mc_list in bond | 888 | * Totally destroys the mc_list in bond |
866 | */ | 889 | */ |
@@ -874,6 +897,7 @@ static void bond_mc_list_destroy(struct bonding *bond) | |||
874 | kfree(dmi); | 897 | kfree(dmi); |
875 | dmi = bond->mc_list; | 898 | dmi = bond->mc_list; |
876 | } | 899 | } |
900 | bond->mc_list = NULL; | ||
877 | } | 901 | } |
878 | 902 | ||
879 | /* | 903 | /* |
@@ -967,6 +991,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct | |||
967 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { | 991 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { |
968 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); | 992 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
969 | } | 993 | } |
994 | bond_resend_igmp_join_requests(bond); | ||
970 | } | 995 | } |
971 | } | 996 | } |
972 | 997 | ||
@@ -3423,15 +3448,21 @@ void bond_register_arp(struct bonding *bond) | |||
3423 | { | 3448 | { |
3424 | struct packet_type *pt = &bond->arp_mon_pt; | 3449 | struct packet_type *pt = &bond->arp_mon_pt; |
3425 | 3450 | ||
3451 | if (pt->type) | ||
3452 | return; | ||
3453 | |||
3426 | pt->type = htons(ETH_P_ARP); | 3454 | pt->type = htons(ETH_P_ARP); |
3427 | pt->dev = NULL; /*bond->dev;XXX*/ | 3455 | pt->dev = bond->dev; |
3428 | pt->func = bond_arp_rcv; | 3456 | pt->func = bond_arp_rcv; |
3429 | dev_add_pack(pt); | 3457 | dev_add_pack(pt); |
3430 | } | 3458 | } |
3431 | 3459 | ||
3432 | void bond_unregister_arp(struct bonding *bond) | 3460 | void bond_unregister_arp(struct bonding *bond) |
3433 | { | 3461 | { |
3434 | dev_remove_pack(&bond->arp_mon_pt); | 3462 | struct packet_type *pt = &bond->arp_mon_pt; |
3463 | |||
3464 | dev_remove_pack(pt); | ||
3465 | pt->type = 0; | ||
3435 | } | 3466 | } |
3436 | 3467 | ||
3437 | /*---------------------------- Hashing Policies -----------------------------*/ | 3468 | /*---------------------------- Hashing Policies -----------------------------*/ |
@@ -4011,42 +4042,6 @@ out: | |||
4011 | return 0; | 4042 | return 0; |
4012 | } | 4043 | } |
4013 | 4044 | ||
4014 | static void bond_activebackup_xmit_copy(struct sk_buff *skb, | ||
4015 | struct bonding *bond, | ||
4016 | struct slave *slave) | ||
4017 | { | ||
4018 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | ||
4019 | struct ethhdr *eth_data; | ||
4020 | u8 *hwaddr; | ||
4021 | int res; | ||
4022 | |||
4023 | if (!skb2) { | ||
4024 | printk(KERN_ERR DRV_NAME ": Error: " | ||
4025 | "bond_activebackup_xmit_copy(): skb_copy() failed\n"); | ||
4026 | return; | ||
4027 | } | ||
4028 | |||
4029 | skb2->mac.raw = (unsigned char *)skb2->data; | ||
4030 | eth_data = eth_hdr(skb2); | ||
4031 | |||
4032 | /* Pick an appropriate source MAC address | ||
4033 | * -- use slave's perm MAC addr, unless used by bond | ||
4034 | * -- otherwise, borrow active slave's perm MAC addr | ||
4035 | * since that will not be used | ||
4036 | */ | ||
4037 | hwaddr = slave->perm_hwaddr; | ||
4038 | if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN)) | ||
4039 | hwaddr = bond->curr_active_slave->perm_hwaddr; | ||
4040 | |||
4041 | /* Set source MAC address appropriately */ | ||
4042 | memcpy(eth_data->h_source, hwaddr, ETH_ALEN); | ||
4043 | |||
4044 | res = bond_dev_queue_xmit(bond, skb2, slave->dev); | ||
4045 | if (res) | ||
4046 | dev_kfree_skb(skb2); | ||
4047 | |||
4048 | return; | ||
4049 | } | ||
4050 | 4045 | ||
4051 | /* | 4046 | /* |
4052 | * in active-backup mode, we know that bond->curr_active_slave is always valid if | 4047 | * in active-backup mode, we know that bond->curr_active_slave is always valid if |
@@ -4067,21 +4062,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
4067 | if (!bond->curr_active_slave) | 4062 | if (!bond->curr_active_slave) |
4068 | goto out; | 4063 | goto out; |
4069 | 4064 | ||
4070 | /* Xmit IGMP frames on all slaves to ensure rapid fail-over | ||
4071 | for multicast traffic on snooping switches */ | ||
4072 | if (skb->protocol == __constant_htons(ETH_P_IP) && | ||
4073 | skb->nh.iph->protocol == IPPROTO_IGMP) { | ||
4074 | struct slave *slave, *active_slave; | ||
4075 | int i; | ||
4076 | |||
4077 | active_slave = bond->curr_active_slave; | ||
4078 | bond_for_each_slave_from_to(bond, slave, i, active_slave->next, | ||
4079 | active_slave->prev) | ||
4080 | if (IS_UP(slave->dev) && | ||
4081 | (slave->link == BOND_LINK_UP)) | ||
4082 | bond_activebackup_xmit_copy(skb, bond, slave); | ||
4083 | } | ||
4084 | |||
4085 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); | 4065 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); |
4086 | 4066 | ||
4087 | out: | 4067 | out: |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index be2ddbb6ef56..9ba21e0f27c5 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1309,7 +1309,7 @@ static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | |||
1309 | static int mv643xx_eth_probe(struct platform_device *pdev) | 1309 | static int mv643xx_eth_probe(struct platform_device *pdev) |
1310 | { | 1310 | { |
1311 | struct mv643xx_eth_platform_data *pd; | 1311 | struct mv643xx_eth_platform_data *pd; |
1312 | int port_num = pdev->id; | 1312 | int port_num; |
1313 | struct mv643xx_private *mp; | 1313 | struct mv643xx_private *mp; |
1314 | struct net_device *dev; | 1314 | struct net_device *dev; |
1315 | u8 *p; | 1315 | u8 *p; |
@@ -1319,6 +1319,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1319 | int duplex = DUPLEX_HALF; | 1319 | int duplex = DUPLEX_HALF; |
1320 | int speed = 0; /* default to auto-negotiation */ | 1320 | int speed = 0; /* default to auto-negotiation */ |
1321 | 1321 | ||
1322 | pd = pdev->dev.platform_data; | ||
1323 | if (pd == NULL) { | ||
1324 | printk(KERN_ERR "No mv643xx_eth_platform_data\n"); | ||
1325 | return -ENODEV; | ||
1326 | } | ||
1327 | |||
1322 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1328 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1323 | if (!dev) | 1329 | if (!dev) |
1324 | return -ENOMEM; | 1330 | return -ENOMEM; |
@@ -1331,8 +1337,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1331 | BUG_ON(!res); | 1337 | BUG_ON(!res); |
1332 | dev->irq = res->start; | 1338 | dev->irq = res->start; |
1333 | 1339 | ||
1334 | mp->port_num = port_num; | ||
1335 | |||
1336 | dev->open = mv643xx_eth_open; | 1340 | dev->open = mv643xx_eth_open; |
1337 | dev->stop = mv643xx_eth_stop; | 1341 | dev->stop = mv643xx_eth_stop; |
1338 | dev->hard_start_xmit = mv643xx_eth_start_xmit; | 1342 | dev->hard_start_xmit = mv643xx_eth_start_xmit; |
@@ -1373,39 +1377,40 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | 1377 | ||
1374 | spin_lock_init(&mp->lock); | 1378 | spin_lock_init(&mp->lock); |
1375 | 1379 | ||
1380 | port_num = pd->port_number; | ||
1381 | |||
1376 | /* set default config values */ | 1382 | /* set default config values */ |
1377 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1383 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1378 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1384 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1379 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1385 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1380 | 1386 | ||
1381 | pd = pdev->dev.platform_data; | 1387 | if (is_valid_ether_addr(pd->mac_addr)) |
1382 | if (pd) { | 1388 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1383 | if (is_valid_ether_addr(pd->mac_addr)) | ||
1384 | memcpy(dev->dev_addr, pd->mac_addr, 6); | ||
1385 | 1389 | ||
1386 | if (pd->phy_addr || pd->force_phy_addr) | 1390 | if (pd->phy_addr || pd->force_phy_addr) |
1387 | ethernet_phy_set(port_num, pd->phy_addr); | 1391 | ethernet_phy_set(port_num, pd->phy_addr); |
1388 | 1392 | ||
1389 | if (pd->rx_queue_size) | 1393 | if (pd->rx_queue_size) |
1390 | mp->rx_ring_size = pd->rx_queue_size; | 1394 | mp->rx_ring_size = pd->rx_queue_size; |
1391 | 1395 | ||
1392 | if (pd->tx_queue_size) | 1396 | if (pd->tx_queue_size) |
1393 | mp->tx_ring_size = pd->tx_queue_size; | 1397 | mp->tx_ring_size = pd->tx_queue_size; |
1394 | 1398 | ||
1395 | if (pd->tx_sram_size) { | 1399 | if (pd->tx_sram_size) { |
1396 | mp->tx_sram_size = pd->tx_sram_size; | 1400 | mp->tx_sram_size = pd->tx_sram_size; |
1397 | mp->tx_sram_addr = pd->tx_sram_addr; | 1401 | mp->tx_sram_addr = pd->tx_sram_addr; |
1398 | } | 1402 | } |
1399 | |||
1400 | if (pd->rx_sram_size) { | ||
1401 | mp->rx_sram_size = pd->rx_sram_size; | ||
1402 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1403 | } | ||
1404 | 1403 | ||
1405 | duplex = pd->duplex; | 1404 | if (pd->rx_sram_size) { |
1406 | speed = pd->speed; | 1405 | mp->rx_sram_size = pd->rx_sram_size; |
1406 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | duplex = pd->duplex; | ||
1410 | speed = pd->speed; | ||
1411 | |||
1412 | mp->port_num = port_num; | ||
1413 | |||
1409 | /* Hook up MII support for ethtool */ | 1414 | /* Hook up MII support for ethtool */ |
1410 | mp->mii.dev = dev; | 1415 | mp->mii.dev = dev; |
1411 | mp->mii.mdio_read = mv643xx_mdio_read; | 1416 | mp->mii.mdio_read = mv643xx_mdio_read; |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 5c57433cb306..c6172a77a6d7 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2024 | struct netdev_private *np = netdev_priv(dev); | 2024 | struct netdev_private *np = netdev_priv(dev); |
2025 | void __iomem * ioaddr = ns_ioaddr(dev); | 2025 | void __iomem * ioaddr = ns_ioaddr(dev); |
2026 | unsigned entry; | 2026 | unsigned entry; |
2027 | unsigned long flags; | ||
2027 | 2028 | ||
2028 | /* Note: Ordering is important here, set the field with the | 2029 | /* Note: Ordering is important here, set the field with the |
2029 | "ownership" bit last, and only then increment cur_tx. */ | 2030 | "ownership" bit last, and only then increment cur_tx. */ |
@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2037 | 2038 | ||
2038 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); | 2039 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); |
2039 | 2040 | ||
2040 | spin_lock_irq(&np->lock); | 2041 | spin_lock_irqsave(&np->lock, flags); |
2041 | 2042 | ||
2042 | if (!np->hands_off) { | 2043 | if (!np->hands_off) { |
2043 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); | 2044 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); |
@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2056 | dev_kfree_skb_irq(skb); | 2057 | dev_kfree_skb_irq(skb); |
2057 | np->stats.tx_dropped++; | 2058 | np->stats.tx_dropped++; |
2058 | } | 2059 | } |
2059 | spin_unlock_irq(&np->lock); | 2060 | spin_unlock_irqrestore(&np->lock, flags); |
2060 | 2061 | ||
2061 | dev->trans_start = jiffies; | 2062 | dev->trans_start = jiffies; |
2062 | 2063 | ||
@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2222 | pkt_len = (desc_status & DescSizeMask) - 4; | 2223 | pkt_len = (desc_status & DescSizeMask) - 4; |
2223 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2224 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2224 | if (desc_status & DescMore) { | 2225 | if (desc_status & DescMore) { |
2226 | unsigned long flags; | ||
2227 | |||
2225 | if (netif_msg_rx_err(np)) | 2228 | if (netif_msg_rx_err(np)) |
2226 | printk(KERN_WARNING | 2229 | printk(KERN_WARNING |
2227 | "%s: Oversized(?) Ethernet " | 2230 | "%s: Oversized(?) Ethernet " |
@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2236 | * reset procedure documented in | 2239 | * reset procedure documented in |
2237 | * AN-1287. */ | 2240 | * AN-1287. */ |
2238 | 2241 | ||
2239 | spin_lock_irq(&np->lock); | 2242 | spin_lock_irqsave(&np->lock, flags); |
2240 | reset_rx(dev); | 2243 | reset_rx(dev); |
2241 | reinit_rx(dev); | 2244 | reinit_rx(dev); |
2242 | writel(np->ring_dma, ioaddr + RxRingPtr); | 2245 | writel(np->ring_dma, ioaddr + RxRingPtr); |
2243 | check_link(dev); | 2246 | check_link(dev); |
2244 | spin_unlock_irq(&np->lock); | 2247 | spin_unlock_irqrestore(&np->lock, flags); |
2245 | 2248 | ||
2246 | /* We'll enable RX on exit from this | 2249 | /* We'll enable RX on exit from this |
2247 | * function. */ | 2250 | * function. */ |
@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
2396 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2399 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2397 | static void natsemi_poll_controller(struct net_device *dev) | 2400 | static void natsemi_poll_controller(struct net_device *dev) |
2398 | { | 2401 | { |
2402 | struct netdev_private *np = netdev_priv(dev); | ||
2403 | |||
2399 | disable_irq(dev->irq); | 2404 | disable_irq(dev->irq); |
2400 | intr_handler(dev->irq, dev); | 2405 | |
2406 | /* | ||
2407 | * A real interrupt might have already reached us at this point | ||
2408 | * but NAPI might still haven't called us back. As the interrupt | ||
2409 | * status register is cleared by reading, we should prevent an | ||
2410 | * interrupt loss in this case... | ||
2411 | */ | ||
2412 | if (!np->intr_status) | ||
2413 | intr_handler(dev->irq, dev); | ||
2414 | |||
2401 | enable_irq(dev->irq); | 2415 | enable_irq(dev->irq); |
2402 | } | 2416 | } |
2403 | #endif | 2417 | #endif |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 36f9d988278f..4d94ba7899bf 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1234,14 +1234,14 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1234 | skb_put(skb, pkt_len); /* Make room */ | 1234 | skb_put(skb, pkt_len); /* Make room */ |
1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, | 1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, |
1236 | lp->rx_dma_addr[entry], | 1236 | lp->rx_dma_addr[entry], |
1237 | PKT_BUF_SZ - 2, | 1237 | pkt_len, |
1238 | PCI_DMA_FROMDEVICE); | 1238 | PCI_DMA_FROMDEVICE); |
1239 | eth_copy_and_sum(skb, | 1239 | eth_copy_and_sum(skb, |
1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), | 1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), |
1241 | pkt_len, 0); | 1241 | pkt_len, 0); |
1242 | pci_dma_sync_single_for_device(lp->pci_dev, | 1242 | pci_dma_sync_single_for_device(lp->pci_dev, |
1243 | lp->rx_dma_addr[entry], | 1243 | lp->rx_dma_addr[entry], |
1244 | PKT_BUF_SZ - 2, | 1244 | pkt_len, |
1245 | PCI_DMA_FROMDEVICE); | 1245 | PCI_DMA_FROMDEVICE); |
1246 | } | 1246 | } |
1247 | lp->stats.rx_bytes += skb->len; | 1247 | lp->stats.rx_bytes += skb->len; |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index fb2b53051635..b3750f284279 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -968,10 +968,10 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location, | |||
968 | 968 | ||
969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) | 969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) |
970 | { | 970 | { |
971 | int i = 0; | 971 | int i; |
972 | u16 status; | 972 | u16 status; |
973 | 973 | ||
974 | while (i++ < 2) | 974 | for (i = 0; i < 2; i++) |
975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
976 | 976 | ||
977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); | 977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); |
@@ -1430,7 +1430,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr) | |||
1430 | int i = 0; | 1430 | int i = 0; |
1431 | u32 status; | 1431 | u32 status; |
1432 | 1432 | ||
1433 | while (i++ < 2) | 1433 | for (i = 0; i < 2; i++) |
1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1435 | 1435 | ||
1436 | if (!(status & MII_STAT_LINK)){ | 1436 | if (!(status & MII_STAT_LINK)){ |
@@ -1466,9 +1466,9 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex | |||
1466 | int phy_addr = sis_priv->cur_phy; | 1466 | int phy_addr = sis_priv->cur_phy; |
1467 | u32 status; | 1467 | u32 status; |
1468 | u16 autoadv, autorec; | 1468 | u16 autoadv, autorec; |
1469 | int i = 0; | 1469 | int i; |
1470 | 1470 | ||
1471 | while (i++ < 2) | 1471 | for (i = 0; i < 2; i++) |
1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1473 | 1473 | ||
1474 | if (!(status & MII_STAT_LINK)) | 1474 | if (!(status & MII_STAT_LINK)) |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index dacea4fd3337..c82befa209a2 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -1685,7 +1685,7 @@ static const struct ethtool_ops de_ethtool_ops = { | |||
1685 | .get_regs = de_get_regs, | 1685 | .get_regs = de_get_regs, |
1686 | }; | 1686 | }; |
1687 | 1687 | ||
1688 | static void __init de21040_get_mac_address (struct de_private *de) | 1688 | static void __devinit de21040_get_mac_address (struct de_private *de) |
1689 | { | 1689 | { |
1690 | unsigned i; | 1690 | unsigned i; |
1691 | 1691 | ||
@@ -1703,7 +1703,7 @@ static void __init de21040_get_mac_address (struct de_private *de) | |||
1703 | } | 1703 | } |
1704 | } | 1704 | } |
1705 | 1705 | ||
1706 | static void __init de21040_get_media_info(struct de_private *de) | 1706 | static void __devinit de21040_get_media_info(struct de_private *de) |
1707 | { | 1707 | { |
1708 | unsigned int i; | 1708 | unsigned int i; |
1709 | 1709 | ||
@@ -1765,7 +1765,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in | |||
1765 | return retval; | 1765 | return retval; |
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | static void __init de21041_get_srom_info (struct de_private *de) | 1768 | static void __devinit de21041_get_srom_info (struct de_private *de) |
1769 | { | 1769 | { |
1770 | unsigned i, sa_offset = 0, ofs; | 1770 | unsigned i, sa_offset = 0, ofs; |
1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; | 1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 7f59a3d4fda2..24a29c99ba94 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -143,9 +143,16 @@ | |||
143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ | 143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ |
144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ | 144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ |
145 | 145 | ||
146 | #define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) | 146 | #define DMFE_DBUG(dbug_now, msg, value) \ |
147 | do { \ | ||
148 | if (dmfe_debug || (dbug_now)) \ | ||
149 | printk(KERN_ERR DRV_NAME ": %s %lx\n",\ | ||
150 | (msg), (long) (value)); \ | ||
151 | } while (0) | ||
147 | 152 | ||
148 | #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); | 153 | #define SHOW_MEDIA_TYPE(mode) \ |
154 | printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \ | ||
155 | (mode & 1) ? "100":"10", (mode & 4) ? "full":"half"); | ||
149 | 156 | ||
150 | 157 | ||
151 | /* CR9 definition: SROM/MII */ | 158 | /* CR9 definition: SROM/MII */ |
@@ -163,10 +170,20 @@ | |||
163 | 170 | ||
164 | #define SROM_V41_CODE 0x14 | 171 | #define SROM_V41_CODE 0x14 |
165 | 172 | ||
166 | #define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5); | 173 | #define SROM_CLK_WRITE(data, ioaddr) \ |
174 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
175 | udelay(5); \ | ||
176 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
177 | udelay(5); \ | ||
178 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
179 | udelay(5); | ||
180 | |||
181 | #define __CHK_IO_SIZE(pci_id, dev_rev) \ | ||
182 | (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \ | ||
183 | DM9102A_IO_SIZE: DM9102_IO_SIZE) | ||
167 | 184 | ||
168 | #define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE | 185 | #define CHK_IO_SIZE(pci_dev, dev_rev) \ |
169 | #define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev) | 186 | (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)) |
170 | 187 | ||
171 | /* Sten Check */ | 188 | /* Sten Check */ |
172 | #define DEVICE net_device | 189 | #define DEVICE net_device |
@@ -187,7 +204,7 @@ struct rx_desc { | |||
187 | struct dmfe_board_info { | 204 | struct dmfe_board_info { |
188 | u32 chip_id; /* Chip vendor/Device ID */ | 205 | u32 chip_id; /* Chip vendor/Device ID */ |
189 | u32 chip_revision; /* Chip revision */ | 206 | u32 chip_revision; /* Chip revision */ |
190 | struct DEVICE *dev; /* net device */ | 207 | struct DEVICE *next_dev; /* next device */ |
191 | struct pci_dev *pdev; /* PCI device */ | 208 | struct pci_dev *pdev; /* PCI device */ |
192 | spinlock_t lock; | 209 | spinlock_t lock; |
193 | 210 | ||
@@ -231,7 +248,6 @@ struct dmfe_board_info { | |||
231 | u8 media_mode; /* user specify media mode */ | 248 | u8 media_mode; /* user specify media mode */ |
232 | u8 op_mode; /* real work media mode */ | 249 | u8 op_mode; /* real work media mode */ |
233 | u8 phy_addr; | 250 | u8 phy_addr; |
234 | u8 link_failed; /* Ever link failed */ | ||
235 | u8 wait_reset; /* Hardware failed, need to reset */ | 251 | u8 wait_reset; /* Hardware failed, need to reset */ |
236 | u8 dm910x_chk_mode; /* Operating mode check */ | 252 | u8 dm910x_chk_mode; /* Operating mode check */ |
237 | u8 first_in_callback; /* Flag to record state */ | 253 | u8 first_in_callback; /* Flag to record state */ |
@@ -329,7 +345,7 @@ static void dmfe_program_DM9802(struct dmfe_board_info *); | |||
329 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); | 345 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); |
330 | static void dmfe_set_phyxcer(struct dmfe_board_info *); | 346 | static void dmfe_set_phyxcer(struct dmfe_board_info *); |
331 | 347 | ||
332 | /* DM910X network baord routine ---------------------------- */ | 348 | /* DM910X network board routine ---------------------------- */ |
333 | 349 | ||
334 | /* | 350 | /* |
335 | * Search DM910X board ,allocate space and register it | 351 | * Search DM910X board ,allocate space and register it |
@@ -356,7 +372,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
356 | SET_NETDEV_DEV(dev, &pdev->dev); | 372 | SET_NETDEV_DEV(dev, &pdev->dev); |
357 | 373 | ||
358 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 374 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
359 | printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); | 375 | printk(KERN_WARNING DRV_NAME |
376 | ": 32-bit PCI DMA not available.\n"); | ||
360 | err = -ENODEV; | 377 | err = -ENODEV; |
361 | goto err_out_free; | 378 | goto err_out_free; |
362 | } | 379 | } |
@@ -399,11 +416,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
399 | /* Init system & device */ | 416 | /* Init system & device */ |
400 | db = netdev_priv(dev); | 417 | db = netdev_priv(dev); |
401 | 418 | ||
402 | db->dev = dev; | ||
403 | |||
404 | /* Allocate Tx/Rx descriptor memory */ | 419 | /* Allocate Tx/Rx descriptor memory */ |
405 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | 420 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * |
406 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | 421 | DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); |
422 | |||
423 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * | ||
424 | TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
407 | 425 | ||
408 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | 426 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; |
409 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | 427 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; |
@@ -428,7 +446,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
428 | dev->poll_controller = &poll_dmfe; | 446 | dev->poll_controller = &poll_dmfe; |
429 | #endif | 447 | #endif |
430 | dev->ethtool_ops = &netdev_ethtool_ops; | 448 | dev->ethtool_ops = &netdev_ethtool_ops; |
431 | netif_carrier_off(db->dev); | 449 | netif_carrier_off(dev); |
432 | spin_lock_init(&db->lock); | 450 | spin_lock_init(&db->lock); |
433 | 451 | ||
434 | pci_read_config_dword(pdev, 0x50, &pci_pmr); | 452 | pci_read_config_dword(pdev, 0x50, &pci_pmr); |
@@ -440,7 +458,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
440 | 458 | ||
441 | /* read 64 word srom data */ | 459 | /* read 64 word srom data */ |
442 | for (i = 0; i < 64; i++) | 460 | for (i = 0; i < 64; i++) |
443 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | 461 | ((u16 *) db->srom)[i] = |
462 | cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
444 | 463 | ||
445 | /* Set Node address */ | 464 | /* Set Node address */ |
446 | for (i = 0; i < 6; i++) | 465 | for (i = 0; i < 6; i++) |
@@ -482,14 +501,17 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev) | |||
482 | DMFE_DBUG(0, "dmfe_remove_one()", 0); | 501 | DMFE_DBUG(0, "dmfe_remove_one()", 0); |
483 | 502 | ||
484 | if (dev) { | 503 | if (dev) { |
504 | |||
505 | unregister_netdev(dev); | ||
506 | |||
485 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | 507 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * |
486 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | 508 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, |
487 | db->desc_pool_dma_ptr); | 509 | db->desc_pool_dma_ptr); |
488 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | 510 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, |
489 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | 511 | db->buf_pool_ptr, db->buf_pool_dma_ptr); |
490 | unregister_netdev(dev); | ||
491 | pci_release_regions(pdev); | 512 | pci_release_regions(pdev); |
492 | free_netdev(dev); /* free board information */ | 513 | free_netdev(dev); /* free board information */ |
514 | |||
493 | pci_set_drvdata(pdev, NULL); | 515 | pci_set_drvdata(pdev, NULL); |
494 | } | 516 | } |
495 | 517 | ||
@@ -509,7 +531,8 @@ static int dmfe_open(struct DEVICE *dev) | |||
509 | 531 | ||
510 | DMFE_DBUG(0, "dmfe_open", 0); | 532 | DMFE_DBUG(0, "dmfe_open", 0); |
511 | 533 | ||
512 | ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev); | 534 | ret = request_irq(dev->irq, &dmfe_interrupt, |
535 | IRQF_SHARED, dev->name, dev); | ||
513 | if (ret) | 536 | if (ret) |
514 | return ret; | 537 | return ret; |
515 | 538 | ||
@@ -518,7 +541,6 @@ static int dmfe_open(struct DEVICE *dev) | |||
518 | db->tx_packet_cnt = 0; | 541 | db->tx_packet_cnt = 0; |
519 | db->tx_queue_cnt = 0; | 542 | db->tx_queue_cnt = 0; |
520 | db->rx_avail_cnt = 0; | 543 | db->rx_avail_cnt = 0; |
521 | db->link_failed = 1; | ||
522 | db->wait_reset = 0; | 544 | db->wait_reset = 0; |
523 | 545 | ||
524 | db->first_in_callback = 0; | 546 | db->first_in_callback = 0; |
@@ -650,7 +672,8 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) | |||
650 | /* No Tx resource check, it never happen nromally */ | 672 | /* No Tx resource check, it never happen nromally */ |
651 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { | 673 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { |
652 | spin_unlock_irqrestore(&db->lock, flags); | 674 | spin_unlock_irqrestore(&db->lock, flags); |
653 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt); | 675 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", |
676 | db->tx_queue_cnt); | ||
654 | return 1; | 677 | return 1; |
655 | } | 678 | } |
656 | 679 | ||
@@ -722,7 +745,8 @@ static int dmfe_stop(struct DEVICE *dev) | |||
722 | 745 | ||
723 | #if 0 | 746 | #if 0 |
724 | /* show statistic counter */ | 747 | /* show statistic counter */ |
725 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | 748 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx" |
749 | " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | ||
726 | db->tx_fifo_underrun, db->tx_excessive_collision, | 750 | db->tx_fifo_underrun, db->tx_excessive_collision, |
727 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, | 751 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, |
728 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, | 752 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, |
@@ -905,7 +929,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) | |||
905 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | 929 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) |
906 | { | 930 | { |
907 | struct rx_desc *rxptr; | 931 | struct rx_desc *rxptr; |
908 | struct sk_buff *skb; | 932 | struct sk_buff *skb, *newskb; |
909 | int rxlen; | 933 | int rxlen; |
910 | u32 rdes0; | 934 | u32 rdes0; |
911 | 935 | ||
@@ -919,7 +943,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
919 | db->rx_avail_cnt--; | 943 | db->rx_avail_cnt--; |
920 | db->interval_rx_cnt++; | 944 | db->interval_rx_cnt++; |
921 | 945 | ||
922 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | 946 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), |
947 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
948 | |||
923 | if ( (rdes0 & 0x300) != 0x300) { | 949 | if ( (rdes0 & 0x300) != 0x300) { |
924 | /* A packet without First/Last flag */ | 950 | /* A packet without First/Last flag */ |
925 | /* reuse this SKB */ | 951 | /* reuse this SKB */ |
@@ -956,9 +982,11 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
956 | } else { | 982 | } else { |
957 | /* Good packet, send to upper layer */ | 983 | /* Good packet, send to upper layer */ |
958 | /* Shorst packet used new SKB */ | 984 | /* Shorst packet used new SKB */ |
959 | if ( (rxlen < RX_COPY_SIZE) && | 985 | if ((rxlen < RX_COPY_SIZE) && |
960 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 986 | ((newskb = dev_alloc_skb(rxlen + 2)) |
961 | != NULL) ) { | 987 | != NULL)) { |
988 | |||
989 | skb = newskb; | ||
962 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
963 | skb->dev = dev; | 991 | skb->dev = dev; |
964 | skb_reserve(skb, 2); /* 16byte align */ | 992 | skb_reserve(skb, 2); /* 16byte align */ |
@@ -1069,6 +1097,8 @@ static void dmfe_timer(unsigned long data) | |||
1069 | struct dmfe_board_info *db = netdev_priv(dev); | 1097 | struct dmfe_board_info *db = netdev_priv(dev); |
1070 | unsigned long flags; | 1098 | unsigned long flags; |
1071 | 1099 | ||
1100 | int link_ok, link_ok_phy; | ||
1101 | |||
1072 | DMFE_DBUG(0, "dmfe_timer()", 0); | 1102 | DMFE_DBUG(0, "dmfe_timer()", 0); |
1073 | spin_lock_irqsave(&db->lock, flags); | 1103 | spin_lock_irqsave(&db->lock, flags); |
1074 | 1104 | ||
@@ -1078,7 +1108,8 @@ static void dmfe_timer(unsigned long data) | |||
1078 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { | 1108 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { |
1079 | db->cr6_data &= ~0x40000; | 1109 | db->cr6_data &= ~0x40000; |
1080 | update_cr6(db->cr6_data, db->ioaddr); | 1110 | update_cr6(db->cr6_data, db->ioaddr); |
1081 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1111 | phy_write(db->ioaddr, |
1112 | db->phy_addr, 0, 0x1000, db->chip_id); | ||
1082 | db->cr6_data |= 0x40000; | 1113 | db->cr6_data |= 0x40000; |
1083 | update_cr6(db->cr6_data, db->ioaddr); | 1114 | update_cr6(db->cr6_data, db->ioaddr); |
1084 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; | 1115 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; |
@@ -1139,21 +1170,41 @@ static void dmfe_timer(unsigned long data) | |||
1139 | (db->chip_revision == 0x02000010)) ) { | 1170 | (db->chip_revision == 0x02000010)) ) { |
1140 | /* DM9102A Chip */ | 1171 | /* DM9102A Chip */ |
1141 | if (tmp_cr12 & 2) | 1172 | if (tmp_cr12 & 2) |
1142 | tmp_cr12 = 0x0; /* Link failed */ | 1173 | link_ok = 0; |
1143 | else | 1174 | else |
1144 | tmp_cr12 = 0x3; /* Link OK */ | 1175 | link_ok = 1; |
1145 | } | 1176 | } |
1177 | else | ||
1178 | /*0x43 is used instead of 0x3 because bit 6 should represent | ||
1179 | link status of external PHY */ | ||
1180 | link_ok = (tmp_cr12 & 0x43) ? 1 : 0; | ||
1181 | |||
1182 | |||
1183 | /* If chip reports that link is failed it could be because external | ||
1184 | PHY link status pin is not conected correctly to chip | ||
1185 | To be sure ask PHY too. | ||
1186 | */ | ||
1187 | |||
1188 | /* need a dummy read because of PHY's register latch*/ | ||
1189 | phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1190 | link_ok_phy = (phy_read (db->ioaddr, | ||
1191 | db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; | ||
1146 | 1192 | ||
1147 | if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { | 1193 | if (link_ok_phy != link_ok) { |
1194 | DMFE_DBUG (0, "PHY and chip report different link status", 0); | ||
1195 | link_ok = link_ok | link_ok_phy; | ||
1196 | } | ||
1197 | |||
1198 | if ( !link_ok && netif_carrier_ok(dev)) { | ||
1148 | /* Link Failed */ | 1199 | /* Link Failed */ |
1149 | DMFE_DBUG(0, "Link Failed", tmp_cr12); | 1200 | DMFE_DBUG(0, "Link Failed", tmp_cr12); |
1150 | db->link_failed = 1; | 1201 | netif_carrier_off(dev); |
1151 | netif_carrier_off(db->dev); | ||
1152 | 1202 | ||
1153 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | 1203 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ |
1154 | /* AUTO or force 1M Homerun/Longrun don't need */ | 1204 | /* AUTO or force 1M Homerun/Longrun don't need */ |
1155 | if ( !(db->media_mode & 0x38) ) | 1205 | if ( !(db->media_mode & 0x38) ) |
1156 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1206 | phy_write(db->ioaddr, db->phy_addr, |
1207 | 0, 0x1000, db->chip_id); | ||
1157 | 1208 | ||
1158 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | 1209 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ |
1159 | if (db->media_mode & DMFE_AUTO) { | 1210 | if (db->media_mode & DMFE_AUTO) { |
@@ -1162,21 +1213,19 @@ static void dmfe_timer(unsigned long data) | |||
1162 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | 1213 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ |
1163 | update_cr6(db->cr6_data, db->ioaddr); | 1214 | update_cr6(db->cr6_data, db->ioaddr); |
1164 | } | 1215 | } |
1165 | } else | 1216 | } else if (!netif_carrier_ok(dev)) { |
1166 | if ((tmp_cr12 & 0x3) && db->link_failed) { | 1217 | |
1167 | DMFE_DBUG(0, "Link link OK", tmp_cr12); | 1218 | DMFE_DBUG(0, "Link link OK", tmp_cr12); |
1168 | db->link_failed = 0; | 1219 | |
1169 | 1220 | /* Auto Sense Speed */ | |
1170 | /* Auto Sense Speed */ | 1221 | if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { |
1171 | if ( (db->media_mode & DMFE_AUTO) && | 1222 | netif_carrier_on(dev); |
1172 | dmfe_sense_speed(db) ) | 1223 | SHOW_MEDIA_TYPE(db->op_mode); |
1173 | db->link_failed = 1; | ||
1174 | else | ||
1175 | netif_carrier_on(db->dev); | ||
1176 | dmfe_process_mode(db); | ||
1177 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | ||
1178 | } | 1224 | } |
1179 | 1225 | ||
1226 | dmfe_process_mode(db); | ||
1227 | } | ||
1228 | |||
1180 | /* HPNA remote command check */ | 1229 | /* HPNA remote command check */ |
1181 | if (db->HPNA_command & 0xf00) { | 1230 | if (db->HPNA_command & 0xf00) { |
1182 | db->HPNA_timer--; | 1231 | db->HPNA_timer--; |
@@ -1221,7 +1270,7 @@ static void dmfe_dynamic_reset(struct DEVICE *dev) | |||
1221 | db->tx_packet_cnt = 0; | 1270 | db->tx_packet_cnt = 0; |
1222 | db->tx_queue_cnt = 0; | 1271 | db->tx_queue_cnt = 0; |
1223 | db->rx_avail_cnt = 0; | 1272 | db->rx_avail_cnt = 0; |
1224 | db->link_failed = 1; | 1273 | netif_carrier_off(dev); |
1225 | db->wait_reset = 0; | 1274 | db->wait_reset = 0; |
1226 | 1275 | ||
1227 | /* Re-initilize DM910X board */ | 1276 | /* Re-initilize DM910X board */ |
@@ -1259,7 +1308,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) | |||
1259 | 1308 | ||
1260 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | 1309 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { |
1261 | rxptr->rx_skb_ptr = skb; | 1310 | rxptr->rx_skb_ptr = skb; |
1262 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1311 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, |
1312 | skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1263 | wmb(); | 1313 | wmb(); |
1264 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1314 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1265 | db->rx_avail_cnt++; | 1315 | db->rx_avail_cnt++; |
@@ -1291,8 +1341,11 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd | |||
1291 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | 1341 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ |
1292 | 1342 | ||
1293 | /* rx descriptor start pointer */ | 1343 | /* rx descriptor start pointer */ |
1294 | db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; | 1344 | db->first_rx_desc = (void *)db->first_tx_desc + |
1295 | db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; | 1345 | sizeof(struct tx_desc) * TX_DESC_CNT; |
1346 | |||
1347 | db->first_rx_desc_dma = db->first_tx_desc_dma + | ||
1348 | sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1296 | db->rx_insert_ptr = db->first_rx_desc; | 1349 | db->rx_insert_ptr = db->first_rx_desc; |
1297 | db->rx_ready_ptr = db->first_rx_desc; | 1350 | db->rx_ready_ptr = db->first_rx_desc; |
1298 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | 1351 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ |
@@ -1470,7 +1523,8 @@ static void allocate_rx_buffer(struct dmfe_board_info *db) | |||
1470 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | 1523 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) |
1471 | break; | 1524 | break; |
1472 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | 1525 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ |
1473 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1526 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, |
1527 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1474 | wmb(); | 1528 | wmb(); |
1475 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1529 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1476 | rxptr = rxptr->next_rx_desc; | 1530 | rxptr = rxptr->next_rx_desc; |
@@ -1510,7 +1564,8 @@ static u16 read_srom_word(long ioaddr, int offset) | |||
1510 | for (i = 16; i > 0; i--) { | 1564 | for (i = 16; i > 0; i--) { |
1511 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | 1565 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); |
1512 | udelay(5); | 1566 | udelay(5); |
1513 | srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | 1567 | srom_data = (srom_data << 1) | |
1568 | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1514 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | 1569 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); |
1515 | udelay(5); | 1570 | udelay(5); |
1516 | } | 1571 | } |
@@ -1537,9 +1592,11 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db) | |||
1537 | 1592 | ||
1538 | if ( (phy_mode & 0x24) == 0x24 ) { | 1593 | if ( (phy_mode & 0x24) == 0x24 ) { |
1539 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ | 1594 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ |
1540 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000; | 1595 | phy_mode = phy_read(db->ioaddr, |
1596 | db->phy_addr, 7, db->chip_id) & 0xf000; | ||
1541 | else /* DM9102/DM9102A */ | 1597 | else /* DM9102/DM9102A */ |
1542 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000; | 1598 | phy_mode = phy_read(db->ioaddr, |
1599 | db->phy_addr, 17, db->chip_id) & 0xf000; | ||
1543 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ | 1600 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ |
1544 | switch (phy_mode) { | 1601 | switch (phy_mode) { |
1545 | case 0x1000: db->op_mode = DMFE_10MHF; break; | 1602 | case 0x1000: db->op_mode = DMFE_10MHF; break; |
@@ -1576,8 +1633,11 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) | |||
1576 | 1633 | ||
1577 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ | 1634 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ |
1578 | if (db->chip_id == PCI_DM9009_ID) { | 1635 | if (db->chip_id == PCI_DM9009_ID) { |
1579 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000; | 1636 | phy_reg = phy_read(db->ioaddr, |
1580 | phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id); | 1637 | db->phy_addr, 18, db->chip_id) & ~0x1000; |
1638 | |||
1639 | phy_write(db->ioaddr, | ||
1640 | db->phy_addr, 18, phy_reg, db->chip_id); | ||
1581 | } | 1641 | } |
1582 | 1642 | ||
1583 | /* Phyxcer capability setting */ | 1643 | /* Phyxcer capability setting */ |
@@ -1650,10 +1710,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1650 | case DMFE_100MHF: phy_reg = 0x2000; break; | 1710 | case DMFE_100MHF: phy_reg = 0x2000; break; |
1651 | case DMFE_100MFD: phy_reg = 0x2100; break; | 1711 | case DMFE_100MFD: phy_reg = 0x2100; break; |
1652 | } | 1712 | } |
1653 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1713 | phy_write(db->ioaddr, |
1714 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1654 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) | 1715 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) |
1655 | mdelay(20); | 1716 | mdelay(20); |
1656 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1717 | phy_write(db->ioaddr, |
1718 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1657 | } | 1719 | } |
1658 | } | 1720 | } |
1659 | } | 1721 | } |
@@ -1663,7 +1725,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1663 | * Write a word to Phy register | 1725 | * Write a word to Phy register |
1664 | */ | 1726 | */ |
1665 | 1727 | ||
1666 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) | 1728 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, |
1729 | u16 phy_data, u32 chip_id) | ||
1667 | { | 1730 | { |
1668 | u16 i; | 1731 | u16 i; |
1669 | unsigned long ioaddr; | 1732 | unsigned long ioaddr; |
@@ -1689,11 +1752,13 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1689 | 1752 | ||
1690 | /* Send Phy address */ | 1753 | /* Send Phy address */ |
1691 | for (i = 0x10; i > 0; i = i >> 1) | 1754 | for (i = 0x10; i > 0; i = i >> 1) |
1692 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1755 | phy_write_1bit(ioaddr, |
1756 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1693 | 1757 | ||
1694 | /* Send register address */ | 1758 | /* Send register address */ |
1695 | for (i = 0x10; i > 0; i = i >> 1) | 1759 | for (i = 0x10; i > 0; i = i >> 1) |
1696 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1760 | phy_write_1bit(ioaddr, |
1761 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1697 | 1762 | ||
1698 | /* written trasnition */ | 1763 | /* written trasnition */ |
1699 | phy_write_1bit(ioaddr, PHY_DATA_1); | 1764 | phy_write_1bit(ioaddr, PHY_DATA_1); |
@@ -1701,7 +1766,8 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1701 | 1766 | ||
1702 | /* Write a word data to PHY controller */ | 1767 | /* Write a word data to PHY controller */ |
1703 | for ( i = 0x8000; i > 0; i >>= 1) | 1768 | for ( i = 0x8000; i > 0; i >>= 1) |
1704 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | 1769 | phy_write_1bit(ioaddr, |
1770 | phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1705 | } | 1771 | } |
1706 | } | 1772 | } |
1707 | 1773 | ||
@@ -1738,11 +1804,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | |||
1738 | 1804 | ||
1739 | /* Send Phy address */ | 1805 | /* Send Phy address */ |
1740 | for (i = 0x10; i > 0; i = i >> 1) | 1806 | for (i = 0x10; i > 0; i = i >> 1) |
1741 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1807 | phy_write_1bit(ioaddr, |
1808 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1742 | 1809 | ||
1743 | /* Send register address */ | 1810 | /* Send register address */ |
1744 | for (i = 0x10; i > 0; i = i >> 1) | 1811 | for (i = 0x10; i > 0; i = i >> 1) |
1745 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1812 | phy_write_1bit(ioaddr, |
1813 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1746 | 1814 | ||
1747 | /* Skip transition state */ | 1815 | /* Skip transition state */ |
1748 | phy_read_1bit(ioaddr); | 1816 | phy_read_1bit(ioaddr); |
@@ -1963,7 +2031,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) | |||
1963 | 2031 | ||
1964 | /* Check remote device status match our setting ot not */ | 2032 | /* Check remote device status match our setting ot not */ |
1965 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { | 2033 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { |
1966 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); | 2034 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, |
2035 | db->chip_id); | ||
1967 | db->HPNA_timer=8; | 2036 | db->HPNA_timer=8; |
1968 | } else | 2037 | } else |
1969 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ | 2038 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ |
@@ -2003,8 +2072,11 @@ module_param(HPNA_tx_cmd, byte, 0); | |||
2003 | module_param(HPNA_NoiseFloor, byte, 0); | 2072 | module_param(HPNA_NoiseFloor, byte, 0); |
2004 | module_param(SF_mode, byte, 0); | 2073 | module_param(SF_mode, byte, 0); |
2005 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); | 2074 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); |
2006 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | 2075 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: " |
2007 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | 2076 | "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); |
2077 | |||
2078 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " | ||
2079 | "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | ||
2008 | 2080 | ||
2009 | /* Description: | 2081 | /* Description: |
2010 | * when user used insmod to add module, system invoked init_module() | 2082 | * when user used insmod to add module, system invoked init_module() |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 885e73d731c2..dab88b958d6e 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3598,17 +3598,20 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3598 | 3598 | ||
3599 | /* Move to next BD in the ring */ | 3599 | /* Move to next BD in the ring */ |
3600 | if (!(bd_status & T_W)) | 3600 | if (!(bd_status & T_W)) |
3601 | ugeth->txBd[txQ] = bd + sizeof(struct qe_bd); | 3601 | bd += sizeof(struct qe_bd); |
3602 | else | 3602 | else |
3603 | ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3603 | bd = ugeth->p_tx_bd_ring[txQ]; |
3604 | 3604 | ||
3605 | /* If the next BD still needs to be cleaned up, then the bds | 3605 | /* If the next BD still needs to be cleaned up, then the bds |
3606 | are full. We need to tell the kernel to stop sending us stuff. */ | 3606 | are full. We need to tell the kernel to stop sending us stuff. */ |
3607 | if (bd == ugeth->confBd[txQ]) { | 3607 | if (bd == ugeth->confBd[txQ]) { |
3608 | if (!netif_queue_stopped(dev)) | 3608 | if (!netif_queue_stopped(dev)) |
3609 | netif_stop_queue(dev); | 3609 | netif_stop_queue(dev); |
3610 | return NETDEV_TX_BUSY; | ||
3610 | } | 3611 | } |
3611 | 3612 | ||
3613 | ugeth->txBd[txQ] = bd; | ||
3614 | |||
3612 | if (ugeth->p_scheduler) { | 3615 | if (ugeth->p_scheduler) { |
3613 | ugeth->cpucount[txQ]++; | 3616 | ugeth->cpucount[txQ]++; |
3614 | /* Indicate to QE that there are more Tx bds ready for | 3617 | /* Indicate to QE that there are more Tx bds ready for |
@@ -3620,7 +3623,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3620 | 3623 | ||
3621 | spin_unlock_irq(&ugeth->lock); | 3624 | spin_unlock_irq(&ugeth->lock); |
3622 | 3625 | ||
3623 | return 0; | 3626 | return NETDEV_TX_OK; |
3624 | } | 3627 | } |
3625 | 3628 | ||
3626 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) | 3629 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
@@ -3722,7 +3725,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3722 | /* Handle the transmitted buffer and release */ | 3725 | /* Handle the transmitted buffer and release */ |
3723 | /* the BD to be used with the current frame */ | 3726 | /* the BD to be used with the current frame */ |
3724 | 3727 | ||
3725 | if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | 3728 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) |
3726 | break; | 3729 | break; |
3727 | 3730 | ||
3728 | ugeth->stats.tx_packets++; | 3731 | ugeth->stats.tx_packets++; |
@@ -3741,10 +3744,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3741 | 3744 | ||
3742 | /* Advance the confirmation BD pointer */ | 3745 | /* Advance the confirmation BD pointer */ |
3743 | if (!(bd_status & T_W)) | 3746 | if (!(bd_status & T_W)) |
3744 | ugeth->confBd[txQ] += sizeof(struct qe_bd); | 3747 | bd += sizeof(struct qe_bd); |
3745 | else | 3748 | else |
3746 | ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3749 | bd = ugeth->p_tx_bd_ring[txQ]; |
3750 | bd_status = in_be32((u32 *)bd); | ||
3747 | } | 3751 | } |
3752 | ugeth->confBd[txQ] = bd; | ||
3748 | return 0; | 3753 | return 0; |
3749 | } | 3754 | } |
3750 | 3755 | ||