aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 16:30:35 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 16:30:35 -0400
commit43d39ae0cf8f891c35e8316948229c7cbffa3994 (patch)
treec8af6999a9327c8b9bbc7384efe8f52f851ff3d8 /drivers
parent63bd8c48e04bbbc9cee3d752857914609d8d406f (diff)
parent84284d3c1d6372bc9ab496607661d230d9c45de4 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (35 commits) xen-netfront: rearrange netfront structure to separate tx and rx netdev: convert non-obvious instances to use ARRAY_SIZE() ucc_geth: Fix build break introduced by commit 09f75cd7bf13720738e6a196cc0107ce9a5bd5a0 gianfar: Fix regression caused by new napi interface gianfar: Cleanup compile warning caused by 0795af57 gianfar: Fix compile regression caused by bea3348e add new prom.h for AU1x00 update AU1000 get_ethernet_addr() MIPSsim: General cleanup Jazzsonic: Fix warning about unused variable. Remove msic_dcr_read() in axon_msi.c Use dcr_host_t.base in dcr_unmap() Add dcr_host_t.base in dcr_read()/dcr_write() Use dcr_host_t.base in ibm_emac_mal Update ibm_newemac to use dcr_host_t.base tehuti: possible leak in bdx_probe TC35815: Fix build SAA9730: Fix build AR7 ethernet myri10ge: update driver version to 1.3.2-1.287 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/au1000_eth.c28
-rw-r--r--drivers/net/bonding/bond_main.c207
-rw-r--r--drivers/net/bonding/bond_sysfs.c74
-rw-r--r--drivers/net/bonding/bonding.h10
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/cpmac.c1174
-rw-r--r--drivers/net/gianfar.c14
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c5
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h5
-rw-r--r--drivers/net/ibm_newemac/mal.c9
-rw-r--r--drivers/net/ibm_newemac/mal.h5
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/jazzsonic.c1
-rw-r--r--drivers/net/mipsnet.c63
-rw-r--r--drivers/net/mipsnet.h83
-rw-r--r--drivers/net/myri10ge/myri10ge.c100
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h90
-rw-r--r--drivers/net/natsemi.c20
-rw-r--r--drivers/net/ne-h8300.c4
-rw-r--r--drivers/net/saa9730.c9
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/tehuti.c3
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/wan/sdla.c8
-rw-r--r--drivers/net/xen-netfront.c35
32 files changed, 1787 insertions, 222 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 6545fa798b12..1b3327ad6bc4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -349,6 +349,7 @@ struct ipoib_neigh {
349 struct sk_buff_head queue; 349 struct sk_buff_head queue;
350 350
351 struct neighbour *neighbour; 351 struct neighbour *neighbour;
352 struct net_device *dev;
352 353
353 struct list_head list; 354 struct list_head list;
354}; 355};
@@ -365,7 +366,8 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
365 INFINIBAND_ALEN, sizeof(void *)); 366 INFINIBAND_ALEN, sizeof(void *));
366} 367}
367 368
368struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); 369struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh,
370 struct net_device *dev);
369void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); 371void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
370 372
371extern struct workqueue_struct *ipoib_workqueue; 373extern struct workqueue_struct *ipoib_workqueue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e072f3c32ce6..362610d870e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -517,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
517 struct ipoib_path *path; 517 struct ipoib_path *path;
518 struct ipoib_neigh *neigh; 518 struct ipoib_neigh *neigh;
519 519
520 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 520 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
521 if (!neigh) { 521 if (!neigh) {
522 ++dev->stats.tx_dropped; 522 ++dev->stats.tx_dropped;
523 dev_kfree_skb_any(skb); 523 dev_kfree_skb_any(skb);
@@ -692,9 +692,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
692 goto out; 692 goto out;
693 } 693 }
694 } else if (neigh->ah) { 694 } else if (neigh->ah) {
695 if (unlikely(memcmp(&neigh->dgid.raw, 695 if (unlikely((memcmp(&neigh->dgid.raw,
696 skb->dst->neighbour->ha + 4, 696 skb->dst->neighbour->ha + 4,
697 sizeof(union ib_gid)))) { 697 sizeof(union ib_gid))) ||
698 (neigh->dev != dev))) {
698 spin_lock(&priv->lock); 699 spin_lock(&priv->lock);
699 /* 700 /*
700 * It's safe to call ipoib_put_ah() inside 701 * It's safe to call ipoib_put_ah() inside
@@ -817,6 +818,13 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
817 unsigned long flags; 818 unsigned long flags;
818 struct ipoib_ah *ah = NULL; 819 struct ipoib_ah *ah = NULL;
819 820
821 neigh = *to_ipoib_neigh(n);
822 if (neigh) {
823 priv = netdev_priv(neigh->dev);
824 ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
825 n->dev->name);
826 } else
827 return;
820 ipoib_dbg(priv, 828 ipoib_dbg(priv,
821 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", 829 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
822 IPOIB_QPN(n->ha), 830 IPOIB_QPN(n->ha),
@@ -824,13 +832,10 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
824 832
825 spin_lock_irqsave(&priv->lock, flags); 833 spin_lock_irqsave(&priv->lock, flags);
826 834
827 neigh = *to_ipoib_neigh(n); 835 if (neigh->ah)
828 if (neigh) { 836 ah = neigh->ah;
829 if (neigh->ah) 837 list_del(&neigh->list);
830 ah = neigh->ah; 838 ipoib_neigh_free(n->dev, neigh);
831 list_del(&neigh->list);
832 ipoib_neigh_free(n->dev, neigh);
833 }
834 839
835 spin_unlock_irqrestore(&priv->lock, flags); 840 spin_unlock_irqrestore(&priv->lock, flags);
836 841
@@ -838,7 +843,8 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
838 ipoib_put_ah(ah); 843 ipoib_put_ah(ah);
839} 844}
840 845
841struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) 846struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
847 struct net_device *dev)
842{ 848{
843 struct ipoib_neigh *neigh; 849 struct ipoib_neigh *neigh;
844 850
@@ -847,6 +853,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
847 return NULL; 853 return NULL;
848 854
849 neigh->neighbour = neighbour; 855 neigh->neighbour = neighbour;
856 neigh->dev = dev;
850 *to_ipoib_neigh(neighbour) = neigh; 857 *to_ipoib_neigh(neighbour) = neigh;
851 skb_queue_head_init(&neigh->queue); 858 skb_queue_head_init(&neigh->queue);
852 ipoib_cm_set(neigh, NULL); 859 ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 827820ec66d1..9bcfc7ad6aa6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -705,7 +705,8 @@ out:
705 if (skb->dst && 705 if (skb->dst &&
706 skb->dst->neighbour && 706 skb->dst->neighbour &&
707 !*to_ipoib_neigh(skb->dst->neighbour)) { 707 !*to_ipoib_neigh(skb->dst->neighbour)) {
708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour); 708 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
709 skb->dev);
709 710
710 if (neigh) { 711 if (neigh) {
711 kref_get(&mcast->ah->ref); 712 kref_get(&mcast->ah->ref);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9c635a237a9d..8f99a0626616 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1780,6 +1780,15 @@ config SC92031
1780 To compile this driver as a module, choose M here: the module 1780 To compile this driver as a module, choose M here: the module
1781 will be called sc92031. This is recommended. 1781 will be called sc92031. This is recommended.
1782 1782
1783config CPMAC
1784 tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
1785 depends on NET_ETHERNET && EXPERIMENTAL && AR7
1786 select PHYLIB
1787 select FIXED_PHY
1788 select FIXED_MII_100_FDX
1789 help
1790 TI AR7 CPMAC Ethernet support
1791
1783config NET_POCKET 1792config NET_POCKET
1784 bool "Pocket and portable adapters" 1793 bool "Pocket and portable adapters"
1785 depends on PARPORT 1794 depends on PARPORT
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d2e0f35da42e..22f78cbd126b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -159,6 +159,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o
159obj-$(CONFIG_8139TOO) += 8139too.o 159obj-$(CONFIG_8139TOO) += 8139too.o
160obj-$(CONFIG_ZNET) += znet.o 160obj-$(CONFIG_ZNET) += znet.o
161obj-$(CONFIG_LAN_SAA9730) += saa9730.o 161obj-$(CONFIG_LAN_SAA9730) += saa9730.o
162obj-$(CONFIG_CPMAC) += cpmac.o
162obj-$(CONFIG_DEPCA) += depca.o 163obj-$(CONFIG_DEPCA) += depca.o
163obj-$(CONFIG_EWRK3) += ewrk3.o 164obj-$(CONFIG_EWRK3) += ewrk3.o
164obj-$(CONFIG_ATP) += atp.o 165obj-$(CONFIG_ATP) += atp.o
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index b46c5d8a77bd..185f98e3964c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -54,13 +54,16 @@
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/crc32.h> 55#include <linux/crc32.h>
56#include <linux/phy.h> 56#include <linux/phy.h>
57
58#include <asm/cpu.h>
57#include <asm/mipsregs.h> 59#include <asm/mipsregs.h>
58#include <asm/irq.h> 60#include <asm/irq.h>
59#include <asm/io.h> 61#include <asm/io.h>
60#include <asm/processor.h> 62#include <asm/processor.h>
61 63
62#include <asm/mach-au1x00/au1000.h> 64#include <au1000.h>
63#include <asm/cpu.h> 65#include <prom.h>
66
64#include "au1000_eth.h" 67#include "au1000_eth.h"
65 68
66#ifdef AU1000_ETH_DEBUG 69#ifdef AU1000_ETH_DEBUG
@@ -96,11 +99,6 @@ static void mdio_write(struct net_device *, int, int, u16);
96static void au1000_adjust_link(struct net_device *); 99static void au1000_adjust_link(struct net_device *);
97static void enable_mac(struct net_device *, int); 100static void enable_mac(struct net_device *, int);
98 101
99// externs
100extern int get_ethernet_addr(char *ethernet_addr);
101extern void str2eaddr(unsigned char *ea, unsigned char *str);
102extern char * prom_getcmdline(void);
103
104/* 102/*
105 * Theory of operation 103 * Theory of operation
106 * 104 *
@@ -619,7 +617,6 @@ static struct net_device * au1000_probe(int port_num)
619 struct au1000_private *aup = NULL; 617 struct au1000_private *aup = NULL;
620 struct net_device *dev = NULL; 618 struct net_device *dev = NULL;
621 db_dest_t *pDB, *pDBfree; 619 db_dest_t *pDB, *pDBfree;
622 char *pmac, *argptr;
623 char ethaddr[6]; 620 char ethaddr[6];
624 int irq, i, err; 621 int irq, i, err;
625 u32 base, macen; 622 u32 base, macen;
@@ -677,21 +674,12 @@ static struct net_device * au1000_probe(int port_num)
677 au_macs[port_num] = aup; 674 au_macs[port_num] = aup;
678 675
679 if (port_num == 0) { 676 if (port_num == 0) {
680 /* Check the environment variables first */ 677 if (prom_get_ethernet_addr(ethaddr) == 0)
681 if (get_ethernet_addr(ethaddr) == 0)
682 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); 678 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
683 else { 679 else {
684 /* Check command line */ 680 printk(KERN_INFO "%s: No MAC address found\n",
685 argptr = prom_getcmdline(); 681 dev->name);
686 if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
687 printk(KERN_INFO "%s: No MAC address found\n",
688 dev->name);
689 /* Use the hard coded MAC addresses */ 682 /* Use the hard coded MAC addresses */
690 else {
691 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
692 memcpy(au1000_mac_addr, ethaddr,
693 sizeof(au1000_mac_addr));
694 }
695 } 683 }
696 684
697 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 685 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 64bfec32e2a6..db80f243dd37 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -98,6 +98,7 @@ static char *xmit_hash_policy = NULL;
98static int arp_interval = BOND_LINK_ARP_INTERV; 98static int arp_interval = BOND_LINK_ARP_INTERV;
99static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 99static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
100static char *arp_validate = NULL; 100static char *arp_validate = NULL;
101static int fail_over_mac = 0;
101struct bond_params bonding_defaults; 102struct bond_params bonding_defaults;
102 103
103module_param(max_bonds, int, 0); 104module_param(max_bonds, int, 0);
@@ -131,6 +132,8 @@ module_param_array(arp_ip_target, charp, NULL, 0);
131MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 132MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
132module_param(arp_validate, charp, 0); 133module_param(arp_validate, charp, 0);
133MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 134MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
135module_param(fail_over_mac, int, 0);
136MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. 0 of off (default), 1 for on.");
134 137
135/*----------------------------- Global variables ----------------------------*/ 138/*----------------------------- Global variables ----------------------------*/
136 139
@@ -1096,7 +1099,21 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1096 if (new_active) { 1099 if (new_active) {
1097 bond_set_slave_active_flags(new_active); 1100 bond_set_slave_active_flags(new_active);
1098 } 1101 }
1099 bond_send_gratuitous_arp(bond); 1102
1103 /* when bonding does not set the slave MAC address, the bond MAC
1104 * address is the one of the active slave.
1105 */
1106 if (new_active && bond->params.fail_over_mac)
1107 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
1108 new_active->dev->addr_len);
1109 if (bond->curr_active_slave &&
1110 test_bit(__LINK_STATE_LINKWATCH_PENDING,
1111 &bond->curr_active_slave->dev->state)) {
1112 dprintk("delaying gratuitous arp on %s\n",
1113 bond->curr_active_slave->dev->name);
1114 bond->send_grat_arp = 1;
1115 } else
1116 bond_send_gratuitous_arp(bond);
1100 } 1117 }
1101} 1118}
1102 1119
@@ -1217,7 +1234,8 @@ static int bond_compute_features(struct bonding *bond)
1217 struct slave *slave; 1234 struct slave *slave;
1218 struct net_device *bond_dev = bond->dev; 1235 struct net_device *bond_dev = bond->dev;
1219 unsigned long features = bond_dev->features; 1236 unsigned long features = bond_dev->features;
1220 unsigned short max_hard_header_len = ETH_HLEN; 1237 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1238 bond_dev->hard_header_len);
1221 int i; 1239 int i;
1222 1240
1223 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1241 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
@@ -1238,6 +1256,23 @@ static int bond_compute_features(struct bonding *bond)
1238 return 0; 1256 return 0;
1239} 1257}
1240 1258
1259
1260static void bond_setup_by_slave(struct net_device *bond_dev,
1261 struct net_device *slave_dev)
1262{
1263 struct bonding *bond = bond_dev->priv;
1264
1265 bond_dev->neigh_setup = slave_dev->neigh_setup;
1266
1267 bond_dev->type = slave_dev->type;
1268 bond_dev->hard_header_len = slave_dev->hard_header_len;
1269 bond_dev->addr_len = slave_dev->addr_len;
1270
1271 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1272 slave_dev->addr_len);
1273 bond->setup_by_slave = 1;
1274}
1275
1241/* enslave device <slave> to bond device <master> */ 1276/* enslave device <slave> to bond device <master> */
1242int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1277int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1243{ 1278{
@@ -1258,8 +1293,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1258 1293
1259 /* bond must be initialized by bond_open() before enslaving */ 1294 /* bond must be initialized by bond_open() before enslaving */
1260 if (!(bond_dev->flags & IFF_UP)) { 1295 if (!(bond_dev->flags & IFF_UP)) {
1261 dprintk("Error, master_dev is not up\n"); 1296 printk(KERN_WARNING DRV_NAME
1262 return -EPERM; 1297 " %s: master_dev is not up in bond_enslave\n",
1298 bond_dev->name);
1263 } 1299 }
1264 1300
1265 /* already enslaved */ 1301 /* already enslaved */
@@ -1312,14 +1348,42 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1312 goto err_undo_flags; 1348 goto err_undo_flags;
1313 } 1349 }
1314 1350
1351 /* set bonding device ether type by slave - bonding netdevices are
1352 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1353 * there is a need to override some of the type dependent attribs/funcs.
1354 *
1355 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1356 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1357 */
1358 if (bond->slave_cnt == 0) {
1359 if (slave_dev->type != ARPHRD_ETHER)
1360 bond_setup_by_slave(bond_dev, slave_dev);
1361 } else if (bond_dev->type != slave_dev->type) {
1362 printk(KERN_ERR DRV_NAME ": %s ether type (%d) is different "
1363 "from other slaves (%d), can not enslave it.\n",
1364 slave_dev->name,
1365 slave_dev->type, bond_dev->type);
1366 res = -EINVAL;
1367 goto err_undo_flags;
1368 }
1369
1315 if (slave_dev->set_mac_address == NULL) { 1370 if (slave_dev->set_mac_address == NULL) {
1316 printk(KERN_ERR DRV_NAME 1371 if (bond->slave_cnt == 0) {
1317 ": %s: Error: The slave device you specified does " 1372 printk(KERN_WARNING DRV_NAME
1318 "not support setting the MAC address. " 1373 ": %s: Warning: The first slave device "
1319 "Your kernel likely does not support slave " 1374 "specified does not support setting the MAC "
1320 "devices.\n", bond_dev->name); 1375 "address. Enabling the fail_over_mac option.",
1321 res = -EOPNOTSUPP; 1376 bond_dev->name);
1322 goto err_undo_flags; 1377 bond->params.fail_over_mac = 1;
1378 } else if (!bond->params.fail_over_mac) {
1379 printk(KERN_ERR DRV_NAME
1380 ": %s: Error: The slave device specified "
1381 "does not support setting the MAC address, "
1382 "but fail_over_mac is not enabled.\n"
1383 , bond_dev->name);
1384 res = -EOPNOTSUPP;
1385 goto err_undo_flags;
1386 }
1323 } 1387 }
1324 1388
1325 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1389 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1340,16 +1404,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1340 */ 1404 */
1341 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1405 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1342 1406
1343 /* 1407 if (!bond->params.fail_over_mac) {
1344 * Set slave to master's mac address. The application already 1408 /*
1345 * set the master's mac address to that of the first slave 1409 * Set slave to master's mac address. The application already
1346 */ 1410 * set the master's mac address to that of the first slave
1347 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); 1411 */
1348 addr.sa_family = slave_dev->type; 1412 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
1349 res = dev_set_mac_address(slave_dev, &addr); 1413 addr.sa_family = slave_dev->type;
1350 if (res) { 1414 res = dev_set_mac_address(slave_dev, &addr);
1351 dprintk("Error %d calling set_mac_address\n", res); 1415 if (res) {
1352 goto err_free; 1416 dprintk("Error %d calling set_mac_address\n", res);
1417 goto err_free;
1418 }
1353 } 1419 }
1354 1420
1355 res = netdev_set_master(slave_dev, bond_dev); 1421 res = netdev_set_master(slave_dev, bond_dev);
@@ -1574,9 +1640,11 @@ err_close:
1574 dev_close(slave_dev); 1640 dev_close(slave_dev);
1575 1641
1576err_restore_mac: 1642err_restore_mac:
1577 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1643 if (!bond->params.fail_over_mac) {
1578 addr.sa_family = slave_dev->type; 1644 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
1579 dev_set_mac_address(slave_dev, &addr); 1645 addr.sa_family = slave_dev->type;
1646 dev_set_mac_address(slave_dev, &addr);
1647 }
1580 1648
1581err_free: 1649err_free:
1582 kfree(new_slave); 1650 kfree(new_slave);
@@ -1749,10 +1817,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1749 /* close slave before restoring its mac address */ 1817 /* close slave before restoring its mac address */
1750 dev_close(slave_dev); 1818 dev_close(slave_dev);
1751 1819
1752 /* restore original ("permanent") mac address */ 1820 if (!bond->params.fail_over_mac) {
1753 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1821 /* restore original ("permanent") mac address */
1754 addr.sa_family = slave_dev->type; 1822 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1755 dev_set_mac_address(slave_dev, &addr); 1823 addr.sa_family = slave_dev->type;
1824 dev_set_mac_address(slave_dev, &addr);
1825 }
1756 1826
1757 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 1827 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1758 IFF_SLAVE_INACTIVE | IFF_BONDING | 1828 IFF_SLAVE_INACTIVE | IFF_BONDING |
@@ -1764,6 +1834,35 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1764} 1834}
1765 1835
1766/* 1836/*
1837* Destroy a bonding device.
1838* Must be under rtnl_lock when this function is called.
1839*/
1840void bond_destroy(struct bonding *bond)
1841{
1842 bond_deinit(bond->dev);
1843 bond_destroy_sysfs_entry(bond);
1844 unregister_netdevice(bond->dev);
1845}
1846
1847/*
1848* First release a slave and than destroy the bond if no more slaves iare left.
1849* Must be under rtnl_lock when this function is called.
1850*/
1851int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev)
1852{
1853 struct bonding *bond = bond_dev->priv;
1854 int ret;
1855
1856 ret = bond_release(bond_dev, slave_dev);
1857 if ((ret == 0) && (bond->slave_cnt == 0)) {
1858 printk(KERN_INFO DRV_NAME ": %s: destroying bond %s.\n",
1859 bond_dev->name, bond_dev->name);
1860 bond_destroy(bond);
1861 }
1862 return ret;
1863}
1864
1865/*
1767 * This function releases all slaves. 1866 * This function releases all slaves.
1768 */ 1867 */
1769static int bond_release_all(struct net_device *bond_dev) 1868static int bond_release_all(struct net_device *bond_dev)
@@ -1839,10 +1938,12 @@ static int bond_release_all(struct net_device *bond_dev)
1839 /* close slave before restoring its mac address */ 1938 /* close slave before restoring its mac address */
1840 dev_close(slave_dev); 1939 dev_close(slave_dev);
1841 1940
1842 /* restore original ("permanent") mac address*/ 1941 if (!bond->params.fail_over_mac) {
1843 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1942 /* restore original ("permanent") mac address*/
1844 addr.sa_family = slave_dev->type; 1943 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1845 dev_set_mac_address(slave_dev, &addr); 1944 addr.sa_family = slave_dev->type;
1945 dev_set_mac_address(slave_dev, &addr);
1946 }
1846 1947
1847 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 1948 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1848 IFF_SLAVE_INACTIVE); 1949 IFF_SLAVE_INACTIVE);
@@ -2013,6 +2114,17 @@ void bond_mii_monitor(struct net_device *bond_dev)
2013 * program could monitor the link itself if needed. 2114 * program could monitor the link itself if needed.
2014 */ 2115 */
2015 2116
2117 if (bond->send_grat_arp) {
2118 if (bond->curr_active_slave && test_bit(__LINK_STATE_LINKWATCH_PENDING,
2119 &bond->curr_active_slave->dev->state))
2120 dprintk("Needs to send gratuitous arp but not yet\n");
2121 else {
2122 dprintk("sending delayed gratuitous arp on on %s\n",
2123 bond->curr_active_slave->dev->name);
2124 bond_send_gratuitous_arp(bond);
2125 bond->send_grat_arp = 0;
2126 }
2127 }
2016 read_lock(&bond->curr_slave_lock); 2128 read_lock(&bond->curr_slave_lock);
2017 oldcurrent = bond->curr_active_slave; 2129 oldcurrent = bond->curr_active_slave;
2018 read_unlock(&bond->curr_slave_lock); 2130 read_unlock(&bond->curr_slave_lock);
@@ -2414,7 +2526,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2414 2526
2415 if (bond->master_ip) { 2527 if (bond->master_ip) {
2416 bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip, 2528 bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip,
2417 bond->master_ip, 0); 2529 bond->master_ip, 0);
2418 } 2530 }
2419 2531
2420 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2532 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
@@ -2951,9 +3063,15 @@ static void bond_info_show_master(struct seq_file *seq)
2951 curr = bond->curr_active_slave; 3063 curr = bond->curr_active_slave;
2952 read_unlock(&bond->curr_slave_lock); 3064 read_unlock(&bond->curr_slave_lock);
2953 3065
2954 seq_printf(seq, "Bonding Mode: %s\n", 3066 seq_printf(seq, "Bonding Mode: %s",
2955 bond_mode_name(bond->params.mode)); 3067 bond_mode_name(bond->params.mode));
2956 3068
3069 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
3070 bond->params.fail_over_mac)
3071 seq_printf(seq, " (fail_over_mac)");
3072
3073 seq_printf(seq, "\n");
3074
2957 if (bond->params.mode == BOND_MODE_XOR || 3075 if (bond->params.mode == BOND_MODE_XOR ||
2958 bond->params.mode == BOND_MODE_8023AD) { 3076 bond->params.mode == BOND_MODE_8023AD) {
2959 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", 3077 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
@@ -3248,6 +3366,11 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
3248 * ... Or is it this? 3366 * ... Or is it this?
3249 */ 3367 */
3250 break; 3368 break;
3369 case NETDEV_GOING_DOWN:
3370 dprintk("slave %s is going down\n", slave_dev->name);
3371 if (bond->setup_by_slave)
3372 bond_release_and_destroy(bond_dev, slave_dev);
3373 break;
3251 case NETDEV_CHANGEMTU: 3374 case NETDEV_CHANGEMTU:
3252 /* 3375 /*
3253 * TODO: Should slaves be allowed to 3376 * TODO: Should slaves be allowed to
@@ -3880,6 +4003,13 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3880 4003
3881 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); 4004 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
3882 4005
4006 /*
4007 * If fail_over_mac is enabled, do nothing and return success.
4008 * Returning an error causes ifenslave to fail.
4009 */
4010 if (bond->params.fail_over_mac)
4011 return 0;
4012
3883 if (!is_valid_ether_addr(sa->sa_data)) { 4013 if (!is_valid_ether_addr(sa->sa_data)) {
3884 return -EADDRNOTAVAIL; 4014 return -EADDRNOTAVAIL;
3885 } 4015 }
@@ -4217,6 +4347,8 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4217 bond->current_arp_slave = NULL; 4347 bond->current_arp_slave = NULL;
4218 bond->primary_slave = NULL; 4348 bond->primary_slave = NULL;
4219 bond->dev = bond_dev; 4349 bond->dev = bond_dev;
4350 bond->send_grat_arp = 0;
4351 bond->setup_by_slave = 0;
4220 INIT_LIST_HEAD(&bond->vlan_list); 4352 INIT_LIST_HEAD(&bond->vlan_list);
4221 4353
4222 /* Initialize the device entry points */ 4354 /* Initialize the device entry points */
@@ -4265,7 +4397,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4265#ifdef CONFIG_PROC_FS 4397#ifdef CONFIG_PROC_FS
4266 bond_create_proc_entry(bond); 4398 bond_create_proc_entry(bond);
4267#endif 4399#endif
4268
4269 list_add_tail(&bond->bond_list, &bond_dev_list); 4400 list_add_tail(&bond->bond_list, &bond_dev_list);
4270 4401
4271 return 0; 4402 return 0;
@@ -4599,6 +4730,11 @@ static int bond_check_params(struct bond_params *params)
4599 primary = NULL; 4730 primary = NULL;
4600 } 4731 }
4601 4732
4733 if (fail_over_mac && (bond_mode != BOND_MODE_ACTIVEBACKUP))
4734 printk(KERN_WARNING DRV_NAME
4735 ": Warning: fail_over_mac only affects "
4736 "active-backup mode.\n");
4737
4602 /* fill params struct with the proper values */ 4738 /* fill params struct with the proper values */
4603 params->mode = bond_mode; 4739 params->mode = bond_mode;
4604 params->xmit_policy = xmit_hashtype; 4740 params->xmit_policy = xmit_hashtype;
@@ -4610,6 +4746,7 @@ static int bond_check_params(struct bond_params *params)
4610 params->use_carrier = use_carrier; 4746 params->use_carrier = use_carrier;
4611 params->lacp_fast = lacp_fast; 4747 params->lacp_fast = lacp_fast;
4612 params->primary[0] = 0; 4748 params->primary[0] = 0;
4749 params->fail_over_mac = fail_over_mac;
4613 4750
4614 if (primary) { 4751 if (primary) {
4615 strncpy(params->primary, primary, IFNAMSIZ); 4752 strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6f49ca7e9b66..80c0c8c415ed 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -164,9 +164,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
164 printk(KERN_INFO DRV_NAME 164 printk(KERN_INFO DRV_NAME
165 ": %s is being deleted...\n", 165 ": %s is being deleted...\n",
166 bond->dev->name); 166 bond->dev->name);
167 bond_deinit(bond->dev); 167 bond_destroy(bond);
168 bond_destroy_sysfs_entry(bond);
169 unregister_netdevice(bond->dev);
170 rtnl_unlock(); 168 rtnl_unlock();
171 goto out; 169 goto out;
172 } 170 }
@@ -260,17 +258,16 @@ static ssize_t bonding_store_slaves(struct device *d,
260 char command[IFNAMSIZ + 1] = { 0, }; 258 char command[IFNAMSIZ + 1] = { 0, };
261 char *ifname; 259 char *ifname;
262 int i, res, found, ret = count; 260 int i, res, found, ret = count;
261 u32 original_mtu;
263 struct slave *slave; 262 struct slave *slave;
264 struct net_device *dev = NULL; 263 struct net_device *dev = NULL;
265 struct bonding *bond = to_bond(d); 264 struct bonding *bond = to_bond(d);
266 265
267 /* Quick sanity check -- is the bond interface up? */ 266 /* Quick sanity check -- is the bond interface up? */
268 if (!(bond->dev->flags & IFF_UP)) { 267 if (!(bond->dev->flags & IFF_UP)) {
269 printk(KERN_ERR DRV_NAME 268 printk(KERN_WARNING DRV_NAME
270 ": %s: Unable to update slaves because interface is down.\n", 269 ": %s: doing slave updates when interface is down.\n",
271 bond->dev->name); 270 bond->dev->name);
272 ret = -EPERM;
273 goto out;
274 } 271 }
275 272
276 /* Note: We can't hold bond->lock here, as bond_create grabs it. */ 273 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
@@ -327,6 +324,7 @@ static ssize_t bonding_store_slaves(struct device *d,
327 } 324 }
328 325
329 /* Set the slave's MTU to match the bond */ 326 /* Set the slave's MTU to match the bond */
327 original_mtu = dev->mtu;
330 if (dev->mtu != bond->dev->mtu) { 328 if (dev->mtu != bond->dev->mtu) {
331 if (dev->change_mtu) { 329 if (dev->change_mtu) {
332 res = dev->change_mtu(dev, 330 res = dev->change_mtu(dev,
@@ -341,6 +339,9 @@ static ssize_t bonding_store_slaves(struct device *d,
341 } 339 }
342 rtnl_lock(); 340 rtnl_lock();
343 res = bond_enslave(bond->dev, dev); 341 res = bond_enslave(bond->dev, dev);
342 bond_for_each_slave(bond, slave, i)
343 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
344 slave->original_mtu = original_mtu;
344 rtnl_unlock(); 345 rtnl_unlock();
345 if (res) { 346 if (res) {
346 ret = res; 347 ret = res;
@@ -353,13 +354,17 @@ static ssize_t bonding_store_slaves(struct device *d,
353 bond_for_each_slave(bond, slave, i) 354 bond_for_each_slave(bond, slave, i)
354 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 355 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
355 dev = slave->dev; 356 dev = slave->dev;
357 original_mtu = slave->original_mtu;
356 break; 358 break;
357 } 359 }
358 if (dev) { 360 if (dev) {
359 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 361 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
360 bond->dev->name, dev->name); 362 bond->dev->name, dev->name);
361 rtnl_lock(); 363 rtnl_lock();
362 res = bond_release(bond->dev, dev); 364 if (bond->setup_by_slave)
365 res = bond_release_and_destroy(bond->dev, dev);
366 else
367 res = bond_release(bond->dev, dev);
363 rtnl_unlock(); 368 rtnl_unlock();
364 if (res) { 369 if (res) {
365 ret = res; 370 ret = res;
@@ -367,9 +372,9 @@ static ssize_t bonding_store_slaves(struct device *d,
367 } 372 }
368 /* set the slave MTU to the default */ 373 /* set the slave MTU to the default */
369 if (dev->change_mtu) { 374 if (dev->change_mtu) {
370 dev->change_mtu(dev, 1500); 375 dev->change_mtu(dev, original_mtu);
371 } else { 376 } else {
372 dev->mtu = 1500; 377 dev->mtu = original_mtu;
373 } 378 }
374 } 379 }
375 else { 380 else {
@@ -563,6 +568,54 @@ static ssize_t bonding_store_arp_validate(struct device *d,
563static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate); 568static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate);
564 569
565/* 570/*
571 * Show and store fail_over_mac. User only allowed to change the
572 * value when there are no slaves.
573 */
574static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attribute *attr, char *buf)
575{
576 struct bonding *bond = to_bond(d);
577
578 return sprintf(buf, "%d\n", bond->params.fail_over_mac) + 1;
579}
580
581static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count)
582{
583 int new_value;
584 int ret = count;
585 struct bonding *bond = to_bond(d);
586
587 if (bond->slave_cnt != 0) {
588 printk(KERN_ERR DRV_NAME
589 ": %s: Can't alter fail_over_mac with slaves in bond.\n",
590 bond->dev->name);
591 ret = -EPERM;
592 goto out;
593 }
594
595 if (sscanf(buf, "%d", &new_value) != 1) {
596 printk(KERN_ERR DRV_NAME
597 ": %s: no fail_over_mac value specified.\n",
598 bond->dev->name);
599 ret = -EINVAL;
600 goto out;
601 }
602
603 if ((new_value == 0) || (new_value == 1)) {
604 bond->params.fail_over_mac = new_value;
605 printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %d.\n",
606 bond->dev->name, new_value);
607 } else {
608 printk(KERN_INFO DRV_NAME
609 ": %s: Ignoring invalid fail_over_mac value %d.\n",
610 bond->dev->name, new_value);
611 }
612out:
613 return ret;
614}
615
616static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac);
617
618/*
566 * Show and set the arp timer interval. There are two tricky bits 619 * Show and set the arp timer interval. There are two tricky bits
567 * here. First, if ARP monitoring is activated, then we must disable 620 * here. First, if ARP monitoring is activated, then we must disable
568 * MII monitoring. Second, if the ARP timer isn't running, we must 621 * MII monitoring. Second, if the ARP timer isn't running, we must
@@ -1383,6 +1436,7 @@ static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1383static struct attribute *per_bond_attrs[] = { 1436static struct attribute *per_bond_attrs[] = {
1384 &dev_attr_slaves.attr, 1437 &dev_attr_slaves.attr,
1385 &dev_attr_mode.attr, 1438 &dev_attr_mode.attr,
1439 &dev_attr_fail_over_mac.attr,
1386 &dev_attr_arp_validate.attr, 1440 &dev_attr_arp_validate.attr,
1387 &dev_attr_arp_interval.attr, 1441 &dev_attr_arp_interval.attr,
1388 &dev_attr_arp_ip_target.attr, 1442 &dev_attr_arp_ip_target.attr,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2a6af7d23728..a8bbd563265c 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.1.3" 25#define DRV_VERSION "3.2.0"
26#define DRV_RELDATE "June 13, 2007" 26#define DRV_RELDATE "September 13, 2007"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -128,6 +128,7 @@ struct bond_params {
128 int arp_interval; 128 int arp_interval;
129 int arp_validate; 129 int arp_validate;
130 int use_carrier; 130 int use_carrier;
131 int fail_over_mac;
131 int updelay; 132 int updelay;
132 int downdelay; 133 int downdelay;
133 int lacp_fast; 134 int lacp_fast;
@@ -156,6 +157,7 @@ struct slave {
156 s8 link; /* one of BOND_LINK_XXXX */ 157 s8 link; /* one of BOND_LINK_XXXX */
157 s8 state; /* one of BOND_STATE_XXXX */ 158 s8 state; /* one of BOND_STATE_XXXX */
158 u32 original_flags; 159 u32 original_flags;
160 u32 original_mtu;
159 u32 link_failure_count; 161 u32 link_failure_count;
160 u16 speed; 162 u16 speed;
161 u8 duplex; 163 u8 duplex;
@@ -185,6 +187,8 @@ struct bonding {
185 struct timer_list mii_timer; 187 struct timer_list mii_timer;
186 struct timer_list arp_timer; 188 struct timer_list arp_timer;
187 s8 kill_timers; 189 s8 kill_timers;
190 s8 send_grat_arp;
191 s8 setup_by_slave;
188 struct net_device_stats stats; 192 struct net_device_stats stats;
189#ifdef CONFIG_PROC_FS 193#ifdef CONFIG_PROC_FS
190 struct proc_dir_entry *proc_entry; 194 struct proc_dir_entry *proc_entry;
@@ -292,6 +296,8 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
292struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 296struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
293int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 297int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
294int bond_create(char *name, struct bond_params *params, struct bonding **newbond); 298int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
299void bond_destroy(struct bonding *bond);
300int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
295void bond_deinit(struct net_device *bond_dev); 301void bond_deinit(struct net_device *bond_dev);
296int bond_create_sysfs(void); 302int bond_create_sysfs(void);
297void bond_destroy_sysfs(void); 303void bond_destroy_sysfs(void);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 563bf5f6fa2a..7df31b5561cc 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4443,7 +4443,7 @@ static struct {
4443 {REG_MAC_COLL_EXCESS}, 4443 {REG_MAC_COLL_EXCESS},
4444 {REG_MAC_COLL_LATE} 4444 {REG_MAC_COLL_LATE}
4445}; 4445};
4446#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) 4446#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4447#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4447#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4448 4448
4449static void cas_read_regs(struct cas *cp, u8 *ptr, int len) 4449static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
new file mode 100644
index 000000000000..ed53aaab4c02
--- /dev/null
+++ b/drivers/net/cpmac.c
@@ -0,0 +1,1174 @@
1/*
2 * Copyright (C) 2006, 2007 Eugene Konev
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/moduleparam.h>
22
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29#include <linux/version.h>
30
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/platform_device.h>
38#include <linux/dma-mapping.h>
39#include <asm/gpio.h>
40
41MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
42MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
43MODULE_LICENSE("GPL");
44
45static int debug_level = 8;
46static int dumb_switch;
47
48/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
49module_param(debug_level, int, 0444);
50module_param(dumb_switch, int, 0444);
51
52MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
53MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
54
55#define CPMAC_VERSION "0.5.0"
56/* stolen from net/ieee80211.h */
57#ifndef MAC_FMT
58#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
59#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \
60 ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5]
61#endif
62/* frame size + 802.1q tag */
63#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
64#define CPMAC_QUEUES 8
65
66/* Ethernet registers */
67#define CPMAC_TX_CONTROL 0x0004
68#define CPMAC_TX_TEARDOWN 0x0008
69#define CPMAC_RX_CONTROL 0x0014
70#define CPMAC_RX_TEARDOWN 0x0018
71#define CPMAC_MBP 0x0100
72# define MBP_RXPASSCRC 0x40000000
73# define MBP_RXQOS 0x20000000
74# define MBP_RXNOCHAIN 0x10000000
75# define MBP_RXCMF 0x01000000
76# define MBP_RXSHORT 0x00800000
77# define MBP_RXCEF 0x00400000
78# define MBP_RXPROMISC 0x00200000
79# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
80# define MBP_RXBCAST 0x00002000
81# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
82# define MBP_RXMCAST 0x00000020
83# define MBP_MCASTCHAN(channel) ((channel) & 0x7)
84#define CPMAC_UNICAST_ENABLE 0x0104
85#define CPMAC_UNICAST_CLEAR 0x0108
86#define CPMAC_MAX_LENGTH 0x010c
87#define CPMAC_BUFFER_OFFSET 0x0110
88#define CPMAC_MAC_CONTROL 0x0160
89# define MAC_TXPTYPE 0x00000200
90# define MAC_TXPACE 0x00000040
91# define MAC_MII 0x00000020
92# define MAC_TXFLOW 0x00000010
93# define MAC_RXFLOW 0x00000008
94# define MAC_MTEST 0x00000004
95# define MAC_LOOPBACK 0x00000002
96# define MAC_FDX 0x00000001
97#define CPMAC_MAC_STATUS 0x0164
98# define MAC_STATUS_QOS 0x00000004
99# define MAC_STATUS_RXFLOW 0x00000002
100# define MAC_STATUS_TXFLOW 0x00000001
101#define CPMAC_TX_INT_ENABLE 0x0178
102#define CPMAC_TX_INT_CLEAR 0x017c
103#define CPMAC_MAC_INT_VECTOR 0x0180
104# define MAC_INT_STATUS 0x00080000
105# define MAC_INT_HOST 0x00040000
106# define MAC_INT_RX 0x00020000
107# define MAC_INT_TX 0x00010000
108#define CPMAC_MAC_EOI_VECTOR 0x0184
109#define CPMAC_RX_INT_ENABLE 0x0198
110#define CPMAC_RX_INT_CLEAR 0x019c
111#define CPMAC_MAC_INT_ENABLE 0x01a8
112#define CPMAC_MAC_INT_CLEAR 0x01ac
113#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
114#define CPMAC_MAC_ADDR_MID 0x01d0
115#define CPMAC_MAC_ADDR_HI 0x01d4
116#define CPMAC_MAC_HASH_LO 0x01d8
117#define CPMAC_MAC_HASH_HI 0x01dc
118#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
119#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
120#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
121#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
122#define CPMAC_REG_END 0x0680
123/*
124 * Rx/Tx statistics
125 * TODO: use some of them to fill stats in cpmac_stats()
126 */
127#define CPMAC_STATS_RX_GOOD 0x0200
128#define CPMAC_STATS_RX_BCAST 0x0204
129#define CPMAC_STATS_RX_MCAST 0x0208
130#define CPMAC_STATS_RX_PAUSE 0x020c
131#define CPMAC_STATS_RX_CRC 0x0210
132#define CPMAC_STATS_RX_ALIGN 0x0214
133#define CPMAC_STATS_RX_OVER 0x0218
134#define CPMAC_STATS_RX_JABBER 0x021c
135#define CPMAC_STATS_RX_UNDER 0x0220
136#define CPMAC_STATS_RX_FRAG 0x0224
137#define CPMAC_STATS_RX_FILTER 0x0228
138#define CPMAC_STATS_RX_QOSFILTER 0x022c
139#define CPMAC_STATS_RX_OCTETS 0x0230
140
141#define CPMAC_STATS_TX_GOOD 0x0234
142#define CPMAC_STATS_TX_BCAST 0x0238
143#define CPMAC_STATS_TX_MCAST 0x023c
144#define CPMAC_STATS_TX_PAUSE 0x0240
145#define CPMAC_STATS_TX_DEFER 0x0244
146#define CPMAC_STATS_TX_COLLISION 0x0248
147#define CPMAC_STATS_TX_SINGLECOLL 0x024c
148#define CPMAC_STATS_TX_MULTICOLL 0x0250
149#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
150#define CPMAC_STATS_TX_LATECOLL 0x0258
151#define CPMAC_STATS_TX_UNDERRUN 0x025c
152#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
153#define CPMAC_STATS_TX_OCTETS 0x0264
154
155#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
156#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
157 (reg)))
158
159/* MDIO bus */
160#define CPMAC_MDIO_VERSION 0x0000
161#define CPMAC_MDIO_CONTROL 0x0004
162# define MDIOC_IDLE 0x80000000
163# define MDIOC_ENABLE 0x40000000
164# define MDIOC_PREAMBLE 0x00100000
165# define MDIOC_FAULT 0x00080000
166# define MDIOC_FAULTDETECT 0x00040000
167# define MDIOC_INTTEST 0x00020000
168# define MDIOC_CLKDIV(div) ((div) & 0xff)
169#define CPMAC_MDIO_ALIVE 0x0008
170#define CPMAC_MDIO_LINK 0x000c
171#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
172# define MDIO_BUSY 0x80000000
173# define MDIO_WRITE 0x40000000
174# define MDIO_REG(reg) (((reg) & 0x1f) << 21)
175# define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
176# define MDIO_DATA(data) ((data) & 0xffff)
177#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
178# define PHYSEL_LINKSEL 0x00000040
179# define PHYSEL_LINKINT 0x00000020
180
181struct cpmac_desc {
182 u32 hw_next;
183 u32 hw_data;
184 u16 buflen;
185 u16 bufflags;
186 u16 datalen;
187 u16 dataflags;
188#define CPMAC_SOP 0x8000
189#define CPMAC_EOP 0x4000
190#define CPMAC_OWN 0x2000
191#define CPMAC_EOQ 0x1000
192 struct sk_buff *skb;
193 struct cpmac_desc *next;
194 dma_addr_t mapping;
195 dma_addr_t data_mapping;
196};
197
198struct cpmac_priv {
199 spinlock_t lock;
200 spinlock_t rx_lock;
201 struct cpmac_desc *rx_head;
202 int ring_size;
203 struct cpmac_desc *desc_ring;
204 dma_addr_t dma_ring;
205 void __iomem *regs;
206 struct mii_bus *mii_bus;
207 struct phy_device *phy;
208 char phy_name[BUS_ID_SIZE];
209 int oldlink, oldspeed, oldduplex;
210 u32 msg_enable;
211 struct net_device *dev;
212 struct work_struct reset_work;
213 struct platform_device *pdev;
214};
215
216static irqreturn_t cpmac_irq(int, void *);
217static void cpmac_hw_start(struct net_device *dev);
218static void cpmac_hw_stop(struct net_device *dev);
219static int cpmac_stop(struct net_device *dev);
220static int cpmac_open(struct net_device *dev);
221
222static void cpmac_dump_regs(struct net_device *dev)
223{
224 int i;
225 struct cpmac_priv *priv = netdev_priv(dev);
226 for (i = 0; i < CPMAC_REG_END; i += 4) {
227 if (i % 16 == 0) {
228 if (i)
229 printk("\n");
230 printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
231 priv->regs + i);
232 }
233 printk(" %08x", cpmac_read(priv->regs, i));
234 }
235 printk("\n");
236}
237
238static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
239{
240 int i;
241 printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
242 for (i = 0; i < sizeof(*desc) / 4; i++)
243 printk(" %08x", ((u32 *)desc)[i]);
244 printk("\n");
245}
246
247static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
248{
249 int i;
250 printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
251 for (i = 0; i < skb->len; i++) {
252 if (i % 16 == 0) {
253 if (i)
254 printk("\n");
255 printk(KERN_DEBUG "%s: data[%p]:", dev->name,
256 skb->data + i);
257 }
258 printk(" %02x", ((u8 *)skb->data)[i]);
259 }
260 printk("\n");
261}
262
263static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
264{
265 u32 val;
266
267 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
268 cpu_relax();
269 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
270 MDIO_PHY(phy_id));
271 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
272 cpu_relax();
273 return MDIO_DATA(val);
274}
275
276static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
277 int reg, u16 val)
278{
279 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
280 cpu_relax();
281 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
282 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
283 return 0;
284}
285
286static int cpmac_mdio_reset(struct mii_bus *bus)
287{
288 ar7_device_reset(AR7_RESET_BIT_MDIO);
289 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
290 MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1));
291 return 0;
292}
293
294static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
295
296static struct mii_bus cpmac_mii = {
297 .name = "cpmac-mii",
298 .read = cpmac_mdio_read,
299 .write = cpmac_mdio_write,
300 .reset = cpmac_mdio_reset,
301 .irq = mii_irqs,
302};
303
304static int cpmac_config(struct net_device *dev, struct ifmap *map)
305{
306 if (dev->flags & IFF_UP)
307 return -EBUSY;
308
309 /* Don't allow changing the I/O address */
310 if (map->base_addr != dev->base_addr)
311 return -EOPNOTSUPP;
312
313 /* ignore other fields */
314 return 0;
315}
316
317static void cpmac_set_multicast_list(struct net_device *dev)
318{
319 struct dev_mc_list *iter;
320 int i;
321 u8 tmp;
322 u32 mbp, bit, hash[2] = { 0, };
323 struct cpmac_priv *priv = netdev_priv(dev);
324
325 mbp = cpmac_read(priv->regs, CPMAC_MBP);
326 if (dev->flags & IFF_PROMISC) {
327 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
328 MBP_RXPROMISC);
329 } else {
330 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
331 if (dev->flags & IFF_ALLMULTI) {
332 /* enable all multicast mode */
333 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
334 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
335 } else {
336 /*
337 * cpmac uses some strange mac address hashing
338 * (not crc32)
339 */
340 for (i = 0, iter = dev->mc_list; i < dev->mc_count;
341 i++, iter = iter->next) {
342 bit = 0;
343 tmp = iter->dmi_addr[0];
344 bit ^= (tmp >> 2) ^ (tmp << 4);
345 tmp = iter->dmi_addr[1];
346 bit ^= (tmp >> 4) ^ (tmp << 2);
347 tmp = iter->dmi_addr[2];
348 bit ^= (tmp >> 6) ^ tmp;
349 tmp = iter->dmi_addr[3];
350 bit ^= (tmp >> 2) ^ (tmp << 4);
351 tmp = iter->dmi_addr[4];
352 bit ^= (tmp >> 4) ^ (tmp << 2);
353 tmp = iter->dmi_addr[5];
354 bit ^= (tmp >> 6) ^ tmp;
355 bit &= 0x3f;
356 hash[bit / 32] |= 1 << (bit % 32);
357 }
358
359 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
360 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
361 }
362 }
363}
364
365static struct sk_buff *cpmac_rx_one(struct net_device *dev,
366 struct cpmac_priv *priv,
367 struct cpmac_desc *desc)
368{
369 struct sk_buff *skb, *result = NULL;
370
371 if (unlikely(netif_msg_hw(priv)))
372 cpmac_dump_desc(dev, desc);
373 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
374 if (unlikely(!desc->datalen)) {
375 if (netif_msg_rx_err(priv) && net_ratelimit())
376 printk(KERN_WARNING "%s: rx: spurious interrupt\n",
377 dev->name);
378 return NULL;
379 }
380
381 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
382 if (likely(skb)) {
383 skb_reserve(skb, 2);
384 skb_put(desc->skb, desc->datalen);
385 desc->skb->protocol = eth_type_trans(desc->skb, dev);
386 desc->skb->ip_summed = CHECKSUM_NONE;
387 dev->stats.rx_packets++;
388 dev->stats.rx_bytes += desc->datalen;
389 result = desc->skb;
390 dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE,
391 DMA_FROM_DEVICE);
392 desc->skb = skb;
393 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
394 CPMAC_SKB_SIZE,
395 DMA_FROM_DEVICE);
396 desc->hw_data = (u32)desc->data_mapping;
397 if (unlikely(netif_msg_pktdata(priv))) {
398 printk(KERN_DEBUG "%s: received packet:\n", dev->name);
399 cpmac_dump_skb(dev, result);
400 }
401 } else {
402 if (netif_msg_rx_err(priv) && net_ratelimit())
403 printk(KERN_WARNING
404 "%s: low on skbs, dropping packet\n", dev->name);
405 dev->stats.rx_dropped++;
406 }
407
408 desc->buflen = CPMAC_SKB_SIZE;
409 desc->dataflags = CPMAC_OWN;
410
411 return result;
412}
413
414static int cpmac_poll(struct net_device *dev, int *budget)
415{
416 struct sk_buff *skb;
417 struct cpmac_desc *desc;
418 int received = 0, quota = min(dev->quota, *budget);
419 struct cpmac_priv *priv = netdev_priv(dev);
420
421 spin_lock(&priv->rx_lock);
422 if (unlikely(!priv->rx_head)) {
423 if (netif_msg_rx_err(priv) && net_ratelimit())
424 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
425 dev->name);
426 netif_rx_complete(dev);
427 return 0;
428 }
429
430 desc = priv->rx_head;
431 while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
432 skb = cpmac_rx_one(dev, priv, desc);
433 if (likely(skb)) {
434 netif_receive_skb(skb);
435 received++;
436 }
437 desc = desc->next;
438 }
439
440 priv->rx_head = desc;
441 spin_unlock(&priv->rx_lock);
442 *budget -= received;
443 dev->quota -= received;
444 if (unlikely(netif_msg_rx_status(priv)))
445 printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name,
446 received);
447 if (desc->dataflags & CPMAC_OWN) {
448 netif_rx_complete(dev);
449 cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
450 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
451 return 0;
452 }
453
454 return 1;
455}
456
457static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
458{
459 int queue, len;
460 struct cpmac_desc *desc;
461 struct cpmac_priv *priv = netdev_priv(dev);
462
463 if (unlikely(skb_padto(skb, ETH_ZLEN))) {
464 if (netif_msg_tx_err(priv) && net_ratelimit())
465 printk(KERN_WARNING
466 "%s: tx: padding failed, dropping\n", dev->name);
467 spin_lock(&priv->lock);
468 dev->stats.tx_dropped++;
469 spin_unlock(&priv->lock);
470 return -ENOMEM;
471 }
472
473 len = max(skb->len, ETH_ZLEN);
474 queue = skb->queue_mapping;
475#ifdef CONFIG_NETDEVICES_MULTIQUEUE
476 netif_stop_subqueue(dev, queue);
477#else
478 netif_stop_queue(dev);
479#endif
480
481 desc = &priv->desc_ring[queue];
482 if (unlikely(desc->dataflags & CPMAC_OWN)) {
483 if (netif_msg_tx_err(priv) && net_ratelimit())
484 printk(KERN_WARNING "%s: tx dma ring full, dropping\n",
485 dev->name);
486 spin_lock(&priv->lock);
487 dev->stats.tx_dropped++;
488 spin_unlock(&priv->lock);
489 dev_kfree_skb_any(skb);
490 return -ENOMEM;
491 }
492
493 spin_lock(&priv->lock);
494 dev->trans_start = jiffies;
495 spin_unlock(&priv->lock);
496 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
497 desc->skb = skb;
498 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
499 DMA_TO_DEVICE);
500 desc->hw_data = (u32)desc->data_mapping;
501 desc->datalen = len;
502 desc->buflen = len;
503 if (unlikely(netif_msg_tx_queued(priv)))
504 printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
505 skb->len);
506 if (unlikely(netif_msg_hw(priv)))
507 cpmac_dump_desc(dev, desc);
508 if (unlikely(netif_msg_pktdata(priv)))
509 cpmac_dump_skb(dev, skb);
510 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
511
512 return 0;
513}
514
515static void cpmac_end_xmit(struct net_device *dev, int queue)
516{
517 struct cpmac_desc *desc;
518 struct cpmac_priv *priv = netdev_priv(dev);
519
520 desc = &priv->desc_ring[queue];
521 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
522 if (likely(desc->skb)) {
523 spin_lock(&priv->lock);
524 dev->stats.tx_packets++;
525 dev->stats.tx_bytes += desc->skb->len;
526 spin_unlock(&priv->lock);
527 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
528 DMA_TO_DEVICE);
529
530 if (unlikely(netif_msg_tx_done(priv)))
531 printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
532 desc->skb, desc->skb->len);
533
534 dev_kfree_skb_irq(desc->skb);
535 desc->skb = NULL;
536#ifdef CONFIG_NETDEVICES_MULTIQUEUE
537 if (netif_subqueue_stopped(dev, queue))
538 netif_wake_subqueue(dev, queue);
539#else
540 if (netif_queue_stopped(dev))
541 netif_wake_queue(dev);
542#endif
543 } else {
544 if (netif_msg_tx_err(priv) && net_ratelimit())
545 printk(KERN_WARNING
546 "%s: end_xmit: spurious interrupt\n", dev->name);
547#ifdef CONFIG_NETDEVICES_MULTIQUEUE
548 if (netif_subqueue_stopped(dev, queue))
549 netif_wake_subqueue(dev, queue);
550#else
551 if (netif_queue_stopped(dev))
552 netif_wake_queue(dev);
553#endif
554 }
555}
556
557static void cpmac_hw_stop(struct net_device *dev)
558{
559 int i;
560 struct cpmac_priv *priv = netdev_priv(dev);
561 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
562
563 ar7_device_reset(pdata->reset_bit);
564 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
565 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
566 cpmac_write(priv->regs, CPMAC_TX_CONTROL,
567 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
568 for (i = 0; i < 8; i++) {
569 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
570 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
571 }
572 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
573 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
574 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
575 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
576 cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
577 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
578}
579
580static void cpmac_hw_start(struct net_device *dev)
581{
582 int i;
583 struct cpmac_priv *priv = netdev_priv(dev);
584 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
585
586 ar7_device_reset(pdata->reset_bit);
587 for (i = 0; i < 8; i++) {
588 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
589 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
590 }
591 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
592
593 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
594 MBP_RXMCAST);
595 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
596 for (i = 0; i < 8; i++)
597 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
598 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
599 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
600 (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
601 (dev->dev_addr[3] << 24));
602 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
603 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
604 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
605 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
606 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
607 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
608 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
609 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
610 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
611
612 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
613 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
614 cpmac_write(priv->regs, CPMAC_TX_CONTROL,
615 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
616 cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
617 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
618 MAC_FDX);
619}
620
621static void cpmac_clear_rx(struct net_device *dev)
622{
623 struct cpmac_priv *priv = netdev_priv(dev);
624 struct cpmac_desc *desc;
625 int i;
626 if (unlikely(!priv->rx_head))
627 return;
628 desc = priv->rx_head;
629 for (i = 0; i < priv->ring_size; i++) {
630 if ((desc->dataflags & CPMAC_OWN) == 0) {
631 if (netif_msg_rx_err(priv) && net_ratelimit())
632 printk(KERN_WARNING "%s: packet dropped\n",
633 dev->name);
634 if (unlikely(netif_msg_hw(priv)))
635 cpmac_dump_desc(dev, desc);
636 desc->dataflags = CPMAC_OWN;
637 dev->stats.rx_dropped++;
638 }
639 desc = desc->next;
640 }
641}
642
643static void cpmac_clear_tx(struct net_device *dev)
644{
645 struct cpmac_priv *priv = netdev_priv(dev);
646 int i;
647 if (unlikely(!priv->desc_ring))
648 return;
649 for (i = 0; i < CPMAC_QUEUES; i++)
650 if (priv->desc_ring[i].skb) {
651 dev_kfree_skb_any(priv->desc_ring[i].skb);
652 if (netif_subqueue_stopped(dev, i))
653 netif_wake_subqueue(dev, i);
654 }
655}
656
657static void cpmac_hw_error(struct work_struct *work)
658{
659 struct cpmac_priv *priv =
660 container_of(work, struct cpmac_priv, reset_work);
661
662 spin_lock(&priv->rx_lock);
663 cpmac_clear_rx(priv->dev);
664 spin_unlock(&priv->rx_lock);
665 cpmac_clear_tx(priv->dev);
666 cpmac_hw_start(priv->dev);
667 netif_start_queue(priv->dev);
668}
669
670static irqreturn_t cpmac_irq(int irq, void *dev_id)
671{
672 struct net_device *dev = dev_id;
673 struct cpmac_priv *priv;
674 int queue;
675 u32 status;
676
677 if (!dev)
678 return IRQ_NONE;
679
680 priv = netdev_priv(dev);
681
682 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
683
684 if (unlikely(netif_msg_intr(priv)))
685 printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
686 status);
687
688 if (status & MAC_INT_TX)
689 cpmac_end_xmit(dev, (status & 7));
690
691 if (status & MAC_INT_RX) {
692 queue = (status >> 8) & 7;
693 netif_rx_schedule(dev);
694 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
695 }
696
697 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
698
699 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) {
700 if (netif_msg_drv(priv) && net_ratelimit())
701 printk(KERN_ERR "%s: hw error, resetting...\n",
702 dev->name);
703 netif_stop_queue(dev);
704 cpmac_hw_stop(dev);
705 schedule_work(&priv->reset_work);
706 if (unlikely(netif_msg_hw(priv)))
707 cpmac_dump_regs(dev);
708 }
709
710 return IRQ_HANDLED;
711}
712
713static void cpmac_tx_timeout(struct net_device *dev)
714{
715 struct cpmac_priv *priv = netdev_priv(dev);
716 int i;
717
718 spin_lock(&priv->lock);
719 dev->stats.tx_errors++;
720 spin_unlock(&priv->lock);
721 if (netif_msg_tx_err(priv) && net_ratelimit())
722 printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
723 /*
724 * FIXME: waking up random queue is not the best thing to
725 * do... on the other hand why we got here at all?
726 */
727#ifdef CONFIG_NETDEVICES_MULTIQUEUE
728 for (i = 0; i < CPMAC_QUEUES; i++)
729 if (priv->desc_ring[i].skb) {
730 dev_kfree_skb_any(priv->desc_ring[i].skb);
731 netif_wake_subqueue(dev, i);
732 break;
733 }
734#else
735 if (priv->desc_ring[0].skb)
736 dev_kfree_skb_any(priv->desc_ring[0].skb);
737 netif_wake_queue(dev);
738#endif
739}
740
741static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
742{
743 struct cpmac_priv *priv = netdev_priv(dev);
744 if (!(netif_running(dev)))
745 return -EINVAL;
746 if (!priv->phy)
747 return -EINVAL;
748 if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
749 (cmd == SIOCSMIIREG))
750 return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
751
752 return -EOPNOTSUPP;
753}
754
755static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
756{
757 struct cpmac_priv *priv = netdev_priv(dev);
758
759 if (priv->phy)
760 return phy_ethtool_gset(priv->phy, cmd);
761
762 return -EINVAL;
763}
764
765static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
766{
767 struct cpmac_priv *priv = netdev_priv(dev);
768
769 if (!capable(CAP_NET_ADMIN))
770 return -EPERM;
771
772 if (priv->phy)
773 return phy_ethtool_sset(priv->phy, cmd);
774
775 return -EINVAL;
776}
777
778static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
779{
780 struct cpmac_priv *priv = netdev_priv(dev);
781
782 ring->rx_max_pending = 1024;
783 ring->rx_mini_max_pending = 1;
784 ring->rx_jumbo_max_pending = 1;
785 ring->tx_max_pending = 1;
786
787 ring->rx_pending = priv->ring_size;
788 ring->rx_mini_pending = 1;
789 ring->rx_jumbo_pending = 1;
790 ring->tx_pending = 1;
791}
792
793static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
794{
795 struct cpmac_priv *priv = netdev_priv(dev);
796
797 if (dev->flags && IFF_UP)
798 return -EBUSY;
799 priv->ring_size = ring->rx_pending;
800 return 0;
801}
802
803static void cpmac_get_drvinfo(struct net_device *dev,
804 struct ethtool_drvinfo *info)
805{
806 strcpy(info->driver, "cpmac");
807 strcpy(info->version, CPMAC_VERSION);
808 info->fw_version[0] = '\0';
809 sprintf(info->bus_info, "%s", "cpmac");
810 info->regdump_len = 0;
811}
812
813static const struct ethtool_ops cpmac_ethtool_ops = {
814 .get_settings = cpmac_get_settings,
815 .set_settings = cpmac_set_settings,
816 .get_drvinfo = cpmac_get_drvinfo,
817 .get_link = ethtool_op_get_link,
818 .get_ringparam = cpmac_get_ringparam,
819 .set_ringparam = cpmac_set_ringparam,
820};
821
822static void cpmac_adjust_link(struct net_device *dev)
823{
824 struct cpmac_priv *priv = netdev_priv(dev);
825 int new_state = 0;
826
827 spin_lock(&priv->lock);
828 if (priv->phy->link) {
829 netif_start_queue(dev);
830 if (priv->phy->duplex != priv->oldduplex) {
831 new_state = 1;
832 priv->oldduplex = priv->phy->duplex;
833 }
834
835 if (priv->phy->speed != priv->oldspeed) {
836 new_state = 1;
837 priv->oldspeed = priv->phy->speed;
838 }
839
840 if (!priv->oldlink) {
841 new_state = 1;
842 priv->oldlink = 1;
843 netif_schedule(dev);
844 }
845 } else if (priv->oldlink) {
846 netif_stop_queue(dev);
847 new_state = 1;
848 priv->oldlink = 0;
849 priv->oldspeed = 0;
850 priv->oldduplex = -1;
851 }
852
853 if (new_state && netif_msg_link(priv) && net_ratelimit())
854 phy_print_status(priv->phy);
855
856 spin_unlock(&priv->lock);
857}
858
859static int cpmac_open(struct net_device *dev)
860{
861 int i, size, res;
862 struct cpmac_priv *priv = netdev_priv(dev);
863 struct resource *mem;
864 struct cpmac_desc *desc;
865 struct sk_buff *skb;
866
867 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
868 0, PHY_INTERFACE_MODE_MII);
869 if (IS_ERR(priv->phy)) {
870 if (netif_msg_drv(priv))
871 printk(KERN_ERR "%s: Could not attach to PHY\n",
872 dev->name);
873 return PTR_ERR(priv->phy);
874 }
875
876 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
877 if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
878 if (netif_msg_drv(priv))
879 printk(KERN_ERR "%s: failed to request registers\n",
880 dev->name);
881 res = -ENXIO;
882 goto fail_reserve;
883 }
884
885 priv->regs = ioremap(mem->start, mem->end - mem->start);
886 if (!priv->regs) {
887 if (netif_msg_drv(priv))
888 printk(KERN_ERR "%s: failed to remap registers\n",
889 dev->name);
890 res = -ENXIO;
891 goto fail_remap;
892 }
893
894 size = priv->ring_size + CPMAC_QUEUES;
895 priv->desc_ring = dma_alloc_coherent(&dev->dev,
896 sizeof(struct cpmac_desc) * size,
897 &priv->dma_ring,
898 GFP_KERNEL);
899 if (!priv->desc_ring) {
900 res = -ENOMEM;
901 goto fail_alloc;
902 }
903
904 for (i = 0; i < size; i++)
905 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
906
907 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
908 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
909 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
910 if (unlikely(!skb)) {
911 res = -ENOMEM;
912 goto fail_desc;
913 }
914 skb_reserve(skb, 2);
915 desc->skb = skb;
916 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
917 CPMAC_SKB_SIZE,
918 DMA_FROM_DEVICE);
919 desc->hw_data = (u32)desc->data_mapping;
920 desc->buflen = CPMAC_SKB_SIZE;
921 desc->dataflags = CPMAC_OWN;
922 desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
923 desc->hw_next = (u32)desc->next->mapping;
924 }
925
926 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
927 dev->name, dev))) {
928 if (netif_msg_drv(priv))
929 printk(KERN_ERR "%s: failed to obtain irq\n",
930 dev->name);
931 goto fail_irq;
932 }
933
934 INIT_WORK(&priv->reset_work, cpmac_hw_error);
935 cpmac_hw_start(dev);
936
937 priv->phy->state = PHY_CHANGELINK;
938 phy_start(priv->phy);
939
940 return 0;
941
942fail_irq:
943fail_desc:
944 for (i = 0; i < priv->ring_size; i++) {
945 if (priv->rx_head[i].skb) {
946 dma_unmap_single(&dev->dev,
947 priv->rx_head[i].data_mapping,
948 CPMAC_SKB_SIZE,
949 DMA_FROM_DEVICE);
950 kfree_skb(priv->rx_head[i].skb);
951 }
952 }
953fail_alloc:
954 kfree(priv->desc_ring);
955 iounmap(priv->regs);
956
957fail_remap:
958 release_mem_region(mem->start, mem->end - mem->start);
959
960fail_reserve:
961 phy_disconnect(priv->phy);
962
963 return res;
964}
965
966static int cpmac_stop(struct net_device *dev)
967{
968 int i;
969 struct cpmac_priv *priv = netdev_priv(dev);
970 struct resource *mem;
971
972 netif_stop_queue(dev);
973
974 cancel_work_sync(&priv->reset_work);
975 phy_stop(priv->phy);
976 phy_disconnect(priv->phy);
977 priv->phy = NULL;
978
979 cpmac_hw_stop(dev);
980
981 for (i = 0; i < 8; i++)
982 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
983 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
984 cpmac_write(priv->regs, CPMAC_MBP, 0);
985
986 free_irq(dev->irq, dev);
987 iounmap(priv->regs);
988 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
989 release_mem_region(mem->start, mem->end - mem->start);
990 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
991 for (i = 0; i < priv->ring_size; i++) {
992 if (priv->rx_head[i].skb) {
993 dma_unmap_single(&dev->dev,
994 priv->rx_head[i].data_mapping,
995 CPMAC_SKB_SIZE,
996 DMA_FROM_DEVICE);
997 kfree_skb(priv->rx_head[i].skb);
998 }
999 }
1000
1001 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1002 (CPMAC_QUEUES + priv->ring_size),
1003 priv->desc_ring, priv->dma_ring);
1004 return 0;
1005}
1006
1007static int external_switch;
1008
1009static int __devinit cpmac_probe(struct platform_device *pdev)
1010{
1011 int rc, phy_id;
1012 struct resource *mem;
1013 struct cpmac_priv *priv;
1014 struct net_device *dev;
1015 struct plat_cpmac_data *pdata;
1016
1017 pdata = pdev->dev.platform_data;
1018
1019 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1020 if (!(pdata->phy_mask & (1 << phy_id)))
1021 continue;
1022 if (!cpmac_mii.phy_map[phy_id])
1023 continue;
1024 break;
1025 }
1026
1027 if (phy_id == PHY_MAX_ADDR) {
1028 if (external_switch || dumb_switch)
1029 phy_id = 0;
1030 else {
1031 printk(KERN_ERR "cpmac: no PHY present\n");
1032 return -ENODEV;
1033 }
1034 }
1035
1036 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1037
1038 if (!dev) {
1039 printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
1040 return -ENOMEM;
1041 }
1042
1043 platform_set_drvdata(pdev, dev);
1044 priv = netdev_priv(dev);
1045
1046 priv->pdev = pdev;
1047 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1048 if (!mem) {
1049 rc = -ENODEV;
1050 goto fail;
1051 }
1052
1053 dev->irq = platform_get_irq_byname(pdev, "irq");
1054
1055 dev->open = cpmac_open;
1056 dev->stop = cpmac_stop;
1057 dev->set_config = cpmac_config;
1058 dev->hard_start_xmit = cpmac_start_xmit;
1059 dev->do_ioctl = cpmac_ioctl;
1060 dev->set_multicast_list = cpmac_set_multicast_list;
1061 dev->tx_timeout = cpmac_tx_timeout;
1062 dev->ethtool_ops = &cpmac_ethtool_ops;
1063 dev->poll = cpmac_poll;
1064 dev->weight = 64;
1065 dev->features |= NETIF_F_MULTI_QUEUE;
1066
1067 spin_lock_init(&priv->lock);
1068 spin_lock_init(&priv->rx_lock);
1069 priv->dev = dev;
1070 priv->ring_size = 64;
1071 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1072 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
1073 if (phy_id == 31) {
1074 snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
1075 cpmac_mii.id, phy_id);
1076 } else
1077 snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
1078
1079 if ((rc = register_netdev(dev))) {
1080 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1081 dev->name);
1082 goto fail;
1083 }
1084
1085 if (netif_msg_probe(priv)) {
1086 printk(KERN_INFO
1087 "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: "
1088 MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq,
1089 priv->phy_name, MAC_ARG(dev->dev_addr));
1090 }
1091 return 0;
1092
1093fail:
1094 free_netdev(dev);
1095 return rc;
1096}
1097
1098static int __devexit cpmac_remove(struct platform_device *pdev)
1099{
1100 struct net_device *dev = platform_get_drvdata(pdev);
1101 unregister_netdev(dev);
1102 free_netdev(dev);
1103 return 0;
1104}
1105
1106static struct platform_driver cpmac_driver = {
1107 .driver.name = "cpmac",
1108 .probe = cpmac_probe,
1109 .remove = __devexit_p(cpmac_remove),
1110};
1111
1112int __devinit cpmac_init(void)
1113{
1114 u32 mask;
1115 int i, res;
1116
1117 cpmac_mii.priv = ioremap(AR7_REGS_MDIO, 256);
1118
1119 if (!cpmac_mii.priv) {
1120 printk(KERN_ERR "Can't ioremap mdio registers\n");
1121 return -ENXIO;
1122 }
1123
1124#warning FIXME: unhardcode gpio&reset bits
1125 ar7_gpio_disable(26);
1126 ar7_gpio_disable(27);
1127 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1128 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1129 ar7_device_reset(AR7_RESET_BIT_EPHY);
1130
1131 cpmac_mii.reset(&cpmac_mii);
1132
1133 for (i = 0; i < 300000; i++)
1134 if ((mask = cpmac_read(cpmac_mii.priv, CPMAC_MDIO_ALIVE)))
1135 break;
1136 else
1137 cpu_relax();
1138
1139 mask &= 0x7fffffff;
1140 if (mask & (mask - 1)) {
1141 external_switch = 1;
1142 mask = 0;
1143 }
1144
1145 cpmac_mii.phy_mask = ~(mask | 0x80000000);
1146
1147 res = mdiobus_register(&cpmac_mii);
1148 if (res)
1149 goto fail_mii;
1150
1151 res = platform_driver_register(&cpmac_driver);
1152 if (res)
1153 goto fail_cpmac;
1154
1155 return 0;
1156
1157fail_cpmac:
1158 mdiobus_unregister(&cpmac_mii);
1159
1160fail_mii:
1161 iounmap(cpmac_mii.priv);
1162
1163 return res;
1164}
1165
1166void __devexit cpmac_exit(void)
1167{
1168 platform_driver_unregister(&cpmac_driver);
1169 mdiobus_unregister(&cpmac_mii);
1170 iounmap(cpmac_mii.priv);
1171}
1172
1173module_init(cpmac_init);
1174module_exit(cpmac_exit);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0db5e6fabe73..558440c15b6c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -168,7 +168,6 @@ static int gfar_probe(struct platform_device *pdev)
168 struct gfar_private *priv = NULL; 168 struct gfar_private *priv = NULL;
169 struct gianfar_platform_data *einfo; 169 struct gianfar_platform_data *einfo;
170 struct resource *r; 170 struct resource *r;
171 int idx;
172 int err = 0; 171 int err = 0;
173 DECLARE_MAC_BUF(mac); 172 DECLARE_MAC_BUF(mac);
174 173
@@ -261,7 +260,9 @@ static int gfar_probe(struct platform_device *pdev)
261 dev->hard_start_xmit = gfar_start_xmit; 260 dev->hard_start_xmit = gfar_start_xmit;
262 dev->tx_timeout = gfar_timeout; 261 dev->tx_timeout = gfar_timeout;
263 dev->watchdog_timeo = TX_TIMEOUT; 262 dev->watchdog_timeo = TX_TIMEOUT;
263#ifdef CONFIG_GFAR_NAPI
264 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); 264 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
265#endif
265#ifdef CONFIG_NET_POLL_CONTROLLER 266#ifdef CONFIG_NET_POLL_CONTROLLER
266 dev->poll_controller = gfar_netpoll; 267 dev->poll_controller = gfar_netpoll;
267#endif 268#endif
@@ -931,9 +932,14 @@ tx_skb_fail:
931/* Returns 0 for success. */ 932/* Returns 0 for success. */
932static int gfar_enet_open(struct net_device *dev) 933static int gfar_enet_open(struct net_device *dev)
933{ 934{
935#ifdef CONFIG_GFAR_NAPI
936 struct gfar_private *priv = netdev_priv(dev);
937#endif
934 int err; 938 int err;
935 939
940#ifdef CONFIG_GFAR_NAPI
936 napi_enable(&priv->napi); 941 napi_enable(&priv->napi);
942#endif
937 943
938 /* Initialize a bunch of registers */ 944 /* Initialize a bunch of registers */
939 init_registers(dev); 945 init_registers(dev);
@@ -943,13 +949,17 @@ static int gfar_enet_open(struct net_device *dev)
943 err = init_phy(dev); 949 err = init_phy(dev);
944 950
945 if(err) { 951 if(err) {
952#ifdef CONFIG_GFAR_NAPI
946 napi_disable(&priv->napi); 953 napi_disable(&priv->napi);
954#endif
947 return err; 955 return err;
948 } 956 }
949 957
950 err = startup_gfar(dev); 958 err = startup_gfar(dev);
951 if (err) 959 if (err)
960#ifdef CONFIG_GFAR_NAPI
952 napi_disable(&priv->napi); 961 napi_disable(&priv->napi);
962#endif
953 963
954 netif_start_queue(dev); 964 netif_start_queue(dev);
955 965
@@ -1103,7 +1113,9 @@ static int gfar_close(struct net_device *dev)
1103{ 1113{
1104 struct gfar_private *priv = netdev_priv(dev); 1114 struct gfar_private *priv = netdev_priv(dev);
1105 1115
1116#ifdef CONFIG_GFAR_NAPI
1106 napi_disable(&priv->napi); 1117 napi_disable(&priv->napi);
1118#endif
1107 1119
1108 stop_gfar(dev); 1120 stop_gfar(dev);
1109 1121
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
index 4e49e8c4f871..dcd8826fc749 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -413,7 +413,10 @@ static int __init mal_probe(struct ocp_device *ocpdev)
413 ocpdev->def->index); 413 ocpdev->def->index);
414 return -ENOMEM; 414 return -ENOMEM;
415 } 415 }
416 mal->dcrbase = maldata->dcr_base; 416
417 /* XXX This only works for native dcr for now */
418 mal->dcrhost = dcr_map(NULL, maldata->dcr_base, 0);
419
417 mal->def = ocpdev->def; 420 mal->def = ocpdev->def;
418 421
419 INIT_LIST_HEAD(&mal->poll_list); 422 INIT_LIST_HEAD(&mal->poll_list);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index 8f54d621994d..b8adbe6d4b01 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -191,7 +191,6 @@ struct mal_commac {
191}; 191};
192 192
193struct ibm_ocp_mal { 193struct ibm_ocp_mal {
194 int dcrbase;
195 dcr_host_t dcrhost; 194 dcr_host_t dcrhost;
196 195
197 struct list_head poll_list; 196 struct list_head poll_list;
@@ -209,12 +208,12 @@ struct ibm_ocp_mal {
209 208
210static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg) 209static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
211{ 210{
212 return dcr_read(mal->dcrhost, mal->dcrbase + reg); 211 return dcr_read(mal->dcrhost, reg);
213} 212}
214 213
215static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val) 214static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
216{ 215{
217 dcr_write(mal->dcrhost, mal->dcrbase + reg, val); 216 dcr_write(mal->dcrhost, reg, val);
218} 217}
219 218
220/* Register MAL devices */ 219/* Register MAL devices */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 58854117b1a9..39f4cb6b0cf3 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -461,6 +461,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
461 struct mal_instance *mal; 461 struct mal_instance *mal;
462 int err = 0, i, bd_size; 462 int err = 0, i, bd_size;
463 int index = mal_count++; 463 int index = mal_count++;
464 unsigned int dcr_base;
464 const u32 *prop; 465 const u32 *prop;
465 u32 cfg; 466 u32 cfg;
466 467
@@ -497,14 +498,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
497 } 498 }
498 mal->num_rx_chans = prop[0]; 499 mal->num_rx_chans = prop[0];
499 500
500 mal->dcr_base = dcr_resource_start(ofdev->node, 0); 501 dcr_base = dcr_resource_start(ofdev->node, 0);
501 if (mal->dcr_base == 0) { 502 if (dcr_base == 0) {
502 printk(KERN_ERR 503 printk(KERN_ERR
503 "mal%d: can't find DCR resource!\n", index); 504 "mal%d: can't find DCR resource!\n", index);
504 err = -ENODEV; 505 err = -ENODEV;
505 goto fail; 506 goto fail;
506 } 507 }
507 mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); 508 mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100);
508 if (!DCR_MAP_OK(mal->dcr_host)) { 509 if (!DCR_MAP_OK(mal->dcr_host)) {
509 printk(KERN_ERR 510 printk(KERN_ERR
510 "mal%d: failed to map DCRs !\n", index); 511 "mal%d: failed to map DCRs !\n", index);
@@ -626,7 +627,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
626 fail2: 627 fail2:
627 dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); 628 dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
628 fail_unmap: 629 fail_unmap:
629 dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); 630 dcr_unmap(mal->dcr_host, 0x100);
630 fail: 631 fail:
631 kfree(mal); 632 kfree(mal);
632 633
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index cb1a16d589fe..784edb8ea822 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -185,7 +185,6 @@ struct mal_commac {
185 185
186struct mal_instance { 186struct mal_instance {
187 int version; 187 int version;
188 int dcr_base;
189 dcr_host_t dcr_host; 188 dcr_host_t dcr_host;
190 189
191 int num_tx_chans; /* Number of TX channels */ 190 int num_tx_chans; /* Number of TX channels */
@@ -213,12 +212,12 @@ struct mal_instance {
213 212
214static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg) 213static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
215{ 214{
216 return dcr_read(mal->dcr_host, mal->dcr_base + reg); 215 return dcr_read(mal->dcr_host, reg);
217} 216}
218 217
219static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val) 218static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val)
220{ 219{
221 dcr_write(mal->dcr_host, mal->dcr_base + reg, val); 220 dcr_write(mal->dcr_host, reg, val);
222} 221}
223 222
224/* Register MAL devices */ 223/* Register MAL devices */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 3e5eca1aa987..a82d8f98383d 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -840,7 +840,7 @@ toshoboe_probe (struct toshoboe_cb *self)
840 840
841 /* test 1: SIR filter and back to back */ 841 /* test 1: SIR filter and back to back */
842 842
843 for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j) 843 for (j = 0; j < ARRAY_SIZE(bauds); ++j)
844 { 844 {
845 int fir = (j > 1); 845 int fir = (j > 1);
846 toshoboe_stopchip (self); 846 toshoboe_stopchip (self);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index d3825c8ee994..5c154fe13859 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -208,7 +208,6 @@ static int __init jazz_sonic_probe(struct platform_device *pdev)
208 struct sonic_local *lp; 208 struct sonic_local *lp;
209 struct resource *res; 209 struct resource *res;
210 int err = 0; 210 int err = 0;
211 int i;
212 DECLARE_MAC_BUF(mac); 211 DECLARE_MAC_BUF(mac);
213 212
214 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index d593175ab6f0..37707a0c0498 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -7,12 +7,12 @@
7#define DEBUG 7#define DEBUG
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/io.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
15#include <asm/io.h>
16#include <asm/mips-boards/simint.h> 16#include <asm/mips-boards/simint.h>
17 17
18#include "mipsnet.h" /* actual device IO mapping */ 18#include "mipsnet.h" /* actual device IO mapping */
@@ -33,9 +33,8 @@ static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
33 if (available_len < len) 33 if (available_len < len)
34 return -EFAULT; 34 return -EFAULT;
35 35
36 for (; len > 0; len--, kdata++) { 36 for (; len > 0; len--, kdata++)
37 *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); 37 *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
38 }
39 38
40 return inl(mipsnet_reg_address(dev, rxDataCount)); 39 return inl(mipsnet_reg_address(dev, rxDataCount));
41} 40}
@@ -47,16 +46,15 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
47 char *buf_ptr = skb->data; 46 char *buf_ptr = skb->data;
48 47
49 pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", 48 pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
50 dev->name, __FUNCTION__, skb->len); 49 dev->name, __FUNCTION__, skb->len);
51 50
52 outl(skb->len, mipsnet_reg_address(dev, txDataCount)); 51 outl(skb->len, mipsnet_reg_address(dev, txDataCount));
53 52
54 pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n", 53 pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
55 dev->name, __FUNCTION__, skb->len); 54 dev->name, __FUNCTION__, skb->len);
56 55
57 for (; count_to_go; buf_ptr++, count_to_go--) { 56 for (; count_to_go; buf_ptr++, count_to_go--)
58 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); 57 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
59 }
60 58
61 dev->stats.tx_packets++; 59 dev->stats.tx_packets++;
62 dev->stats.tx_bytes += skb->len; 60 dev->stats.tx_bytes += skb->len;
@@ -67,7 +65,7 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
67static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) 65static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
68{ 66{
69 pr_debug("%s:%s(): transmitting %d bytes\n", 67 pr_debug("%s:%s(): transmitting %d bytes\n",
70 dev->name, __FUNCTION__, skb->len); 68 dev->name, __FUNCTION__, skb->len);
71 69
72 /* Only one packet at a time. Once TXDONE interrupt is serviced, the 70 /* Only one packet at a time. Once TXDONE interrupt is serviced, the
73 * queue will be restarted. 71 * queue will be restarted.
@@ -83,7 +81,8 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
83 struct sk_buff *skb; 81 struct sk_buff *skb;
84 size_t len = count; 82 size_t len = count;
85 83
86 if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { 84 skb = alloc_skb(len + 2, GFP_KERNEL);
85 if (!skb) {
87 dev->stats.rx_dropped++; 86 dev->stats.rx_dropped++;
88 return -ENOMEM; 87 return -ENOMEM;
89 } 88 }
@@ -96,7 +95,7 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
96 skb->ip_summed = CHECKSUM_UNNECESSARY; 95 skb->ip_summed = CHECKSUM_UNNECESSARY;
97 96
98 pr_debug("%s:%s(): pushing RXed data to kernel\n", 97 pr_debug("%s:%s(): pushing RXed data to kernel\n",
99 dev->name, __FUNCTION__); 98 dev->name, __FUNCTION__);
100 netif_rx(skb); 99 netif_rx(skb);
101 100
102 dev->stats.rx_packets++; 101 dev->stats.rx_packets++;
@@ -114,42 +113,44 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
114 113
115 if (irq == dev->irq) { 114 if (irq == dev->irq) {
116 pr_debug("%s:%s(): irq %d for device\n", 115 pr_debug("%s:%s(): irq %d for device\n",
117 dev->name, __FUNCTION__, irq); 116 dev->name, __FUNCTION__, irq);
118 117
119 retval = IRQ_HANDLED; 118 retval = IRQ_HANDLED;
120 119
121 interruptFlags = 120 interruptFlags =
122 inl(mipsnet_reg_address(dev, interruptControl)); 121 inl(mipsnet_reg_address(dev, interruptControl));
123 pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name, 122 pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
124 __FUNCTION__, interruptFlags); 123 __FUNCTION__, interruptFlags);
125 124
126 if (interruptFlags & MIPSNET_INTCTL_TXDONE) { 125 if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
127 pr_debug("%s:%s(): got TXDone\n", 126 pr_debug("%s:%s(): got TXDone\n",
128 dev->name, __FUNCTION__); 127 dev->name, __FUNCTION__);
129 outl(MIPSNET_INTCTL_TXDONE, 128 outl(MIPSNET_INTCTL_TXDONE,
130 mipsnet_reg_address(dev, interruptControl)); 129 mipsnet_reg_address(dev, interruptControl));
131 // only one packet at a time, we are done. 130 /* only one packet at a time, we are done. */
132 netif_wake_queue(dev); 131 netif_wake_queue(dev);
133 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { 132 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
134 pr_debug("%s:%s(): got RX data\n", 133 pr_debug("%s:%s(): got RX data\n",
135 dev->name, __FUNCTION__); 134 dev->name, __FUNCTION__);
136 mipsnet_get_fromdev(dev, 135 mipsnet_get_fromdev(dev,
137 inl(mipsnet_reg_address(dev, rxDataCount))); 136 inl(mipsnet_reg_address(dev, rxDataCount)));
138 pr_debug("%s:%s(): clearing RX int\n", 137 pr_debug("%s:%s(): clearing RX int\n",
139 dev->name, __FUNCTION__); 138 dev->name, __FUNCTION__);
140 outl(MIPSNET_INTCTL_RXDONE, 139 outl(MIPSNET_INTCTL_RXDONE,
141 mipsnet_reg_address(dev, interruptControl)); 140 mipsnet_reg_address(dev, interruptControl));
142 141
143 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { 142 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
144 pr_debug("%s:%s(): got test interrupt\n", 143 pr_debug("%s:%s(): got test interrupt\n",
145 dev->name, __FUNCTION__); 144 dev->name, __FUNCTION__);
146 // TESTBIT is cleared on read. 145 /*
147 // And takes effect after a write with 0 146 * TESTBIT is cleared on read.
147 * And takes effect after a write with 0
148 */
148 outl(0, mipsnet_reg_address(dev, interruptControl)); 149 outl(0, mipsnet_reg_address(dev, interruptControl));
149 } else { 150 } else {
150 pr_debug("%s:%s(): no valid fags 0x%016llx\n", 151 pr_debug("%s:%s(): no valid fags 0x%016llx\n",
151 dev->name, __FUNCTION__, interruptFlags); 152 dev->name, __FUNCTION__, interruptFlags);
152 // Maybe shared IRQ, just ignore, no clearing. 153 /* Maybe shared IRQ, just ignore, no clearing. */
153 retval = IRQ_NONE; 154 retval = IRQ_NONE;
154 } 155 }
155 156
@@ -159,7 +160,7 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
159 retval = IRQ_NONE; 160 retval = IRQ_NONE;
160 } 161 }
161 return retval; 162 return retval;
162} //mipsnet_interrupt() 163}
163 164
164static int mipsnet_open(struct net_device *dev) 165static int mipsnet_open(struct net_device *dev)
165{ 166{
@@ -171,18 +172,18 @@ static int mipsnet_open(struct net_device *dev)
171 172
172 if (err) { 173 if (err) {
173 pr_debug("%s: %s(): can't get irq %d\n", 174 pr_debug("%s: %s(): can't get irq %d\n",
174 dev->name, __FUNCTION__, dev->irq); 175 dev->name, __FUNCTION__, dev->irq);
175 release_region(dev->base_addr, MIPSNET_IO_EXTENT); 176 release_region(dev->base_addr, MIPSNET_IO_EXTENT);
176 return err; 177 return err;
177 } 178 }
178 179
179 pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n", 180 pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
180 dev->name, __FUNCTION__, dev->base_addr, dev->irq); 181 dev->name, __FUNCTION__, dev->base_addr, dev->irq);
181 182
182 183
183 netif_start_queue(dev); 184 netif_start_queue(dev);
184 185
185 // test interrupt handler 186 /* test interrupt handler */
186 outl(MIPSNET_INTCTL_TESTBIT, 187 outl(MIPSNET_INTCTL_TESTBIT,
187 mipsnet_reg_address(dev, interruptControl)); 188 mipsnet_reg_address(dev, interruptControl));
188 189
@@ -199,8 +200,6 @@ static int mipsnet_close(struct net_device *dev)
199 200
200static void mipsnet_set_mclist(struct net_device *dev) 201static void mipsnet_set_mclist(struct net_device *dev)
201{ 202{
202 // we don't do anything
203 return;
204} 203}
205 204
206static int __init mipsnet_probe(struct device *dev) 205static int __init mipsnet_probe(struct device *dev)
@@ -226,13 +225,13 @@ static int __init mipsnet_probe(struct device *dev)
226 */ 225 */
227 netdev->base_addr = 0x4200; 226 netdev->base_addr = 0x4200;
228 netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + 227 netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
229 inl(mipsnet_reg_address(netdev, interruptInfo)); 228 inl(mipsnet_reg_address(netdev, interruptInfo));
230 229
231 // Get the io region now, get irq on open() 230 /* Get the io region now, get irq on open() */
232 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { 231 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
233 pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} " 232 pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
234 "for dev is not availble.\n", netdev->name, 233 "for dev is not availble.\n", netdev->name,
235 __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); 234 __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
236 err = -EBUSY; 235 err = -EBUSY;
237 goto out_free_netdev; 236 goto out_free_netdev;
238 } 237 }
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
index 026c732024c9..0132c6714a40 100644
--- a/drivers/net/mipsnet.h
+++ b/drivers/net/mipsnet.h
@@ -9,32 +9,34 @@
9/* 9/*
10 * Id of this Net device, as seen by the core. 10 * Id of this Net device, as seen by the core.
11 */ 11 */
12#define MIPS_NET_DEV_ID ((uint64_t) \ 12#define MIPS_NET_DEV_ID ((uint64_t) \
13 ((uint64_t)'M'<< 0)| \ 13 ((uint64_t) 'M' << 0)| \
14 ((uint64_t)'I'<< 8)| \ 14 ((uint64_t) 'I' << 8)| \
15 ((uint64_t)'P'<<16)| \ 15 ((uint64_t) 'P' << 16)| \
16 ((uint64_t)'S'<<24)| \ 16 ((uint64_t) 'S' << 24)| \
17 ((uint64_t)'N'<<32)| \ 17 ((uint64_t) 'N' << 32)| \
18 ((uint64_t)'E'<<40)| \ 18 ((uint64_t) 'E' << 40)| \
19 ((uint64_t)'T'<<48)| \ 19 ((uint64_t) 'T' << 48)| \
20 ((uint64_t)'0'<<56)) 20 ((uint64_t) '0' << 56))
21 21
22/* 22/*
23 * Net status/control block as seen by sw in the core. 23 * Net status/control block as seen by sw in the core.
24 * (Why not use bit fields? can't be bothered with cross-platform struct 24 * (Why not use bit fields? can't be bothered with cross-platform struct
25 * packing.) 25 * packing.)
26 */ 26 */
27typedef struct _net_control_block { 27struct net_control_block {
28 /// dev info for probing 28 /*
29 /// reads as MIPSNET%d where %d is some form of version 29 * dev info for probing
30 uint64_t devId; /*0x00 */ 30 * reads as MIPSNET%d where %d is some form of version
31 */
32 uint64_t devId; /* 0x00 */
31 33
32 /* 34 /*
33 * read only busy flag. 35 * read only busy flag.
34 * Set and cleared by the Net Device to indicate that an rx or a tx 36 * Set and cleared by the Net Device to indicate that an rx or a tx
35 * is in progress. 37 * is in progress.
36 */ 38 */
37 uint32_t busy; /*0x08 */ 39 uint32_t busy; /* 0x08 */
38 40
39 /* 41 /*
40 * Set by the Net Device. 42 * Set by the Net Device.
@@ -43,16 +45,16 @@ typedef struct _net_control_block {
43 * rxDataBuffer. The value will decrease till 0 until all the data 45 * rxDataBuffer. The value will decrease till 0 until all the data
44 * from rxDataBuffer has been read. 46 * from rxDataBuffer has been read.
45 */ 47 */
46 uint32_t rxDataCount; /*0x0c */ 48 uint32_t rxDataCount; /* 0x0c */
47#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16) 49#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
48 50
49 /* 51 /*
50 * Settable from the MIPS core, cleared by the Net Device. 52 * Settable from the MIPS core, cleared by the Net Device. The core
51 * The core should set the number of bytes it wants to send, 53 * should set the number of bytes it wants to send, then it should
52 * then it should write those bytes of data to txDataBuffer. 54 * write those bytes of data to txDataBuffer. The device will clear
53 * The device will clear txDataCount has been processed (not necessarily sent). 55 * txDataCount has been processed (not necessarily sent).
54 */ 56 */
55 uint32_t txDataCount; /*0x10 */ 57 uint32_t txDataCount; /* 0x10 */
56 58
57 /* 59 /*
58 * Interrupt control 60 * Interrupt control
@@ -69,39 +71,42 @@ typedef struct _net_control_block {
69 * To clear the test interrupt, write 0 to this register. 71 * To clear the test interrupt, write 0 to this register.
70 */ 72 */
71 uint32_t interruptControl; /*0x14 */ 73 uint32_t interruptControl; /*0x14 */
72#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1<< 0)) 74#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0))
73#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1<< 1)) 75#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1))
74#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1<<31)) 76#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31))
75#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT) 77#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \
78 MIPSNET_INTCTL_RXDONE | \
79 MIPSNET_INTCTL_TESTBIT)
76 80
77 /* 81 /*
78 * Readonly core-specific interrupt info for the device to signal the core. 82 * Readonly core-specific interrupt info for the device to signal the
79 * The meaning of the contents of this field might change. 83 * core. The meaning of the contents of this field might change.
80 */ 84 *
81 /*###\todo: the whole memIntf interrupt scheme is messy: the device should have 85 * TODO: the whole memIntf interrupt scheme is messy: the device should
82 * no control what so ever of what VPE/register set is being used. 86 * have no control what so ever of what VPE/register set is being
83 * The MemIntf should only expose interrupt lines, and something in the 87 * used. The MemIntf should only expose interrupt lines, and
84 * config should be responsible for the line<->core/vpe bindings. 88 * something in the config should be responsible for the
89 * line<->core/vpe bindings.
85 */ 90 */
86 uint32_t interruptInfo; /*0x18 */ 91 uint32_t interruptInfo; /* 0x18 */
87 92
88 /* 93 /*
89 * This is where the received data is read out. 94 * This is where the received data is read out.
90 * There is more data to read until rxDataReady is 0. 95 * There is more data to read until rxDataReady is 0.
91 * Only 1 byte at this regs offset is used. 96 * Only 1 byte at this regs offset is used.
92 */ 97 */
93 uint32_t rxDataBuffer; /*0x1c */ 98 uint32_t rxDataBuffer; /* 0x1c */
94 99
95 /* 100 /*
96 * This is where the data to transmit is written. 101 * This is where the data to transmit is written. Data should be
97 * Data should be written for the amount specified in the txDataCount register. 102 * written for the amount specified in the txDataCount register. Only
98 * Only 1 byte at this regs offset is used. 103 * 1 byte at this regs offset is used.
99 */ 104 */
100 uint32_t txDataBuffer; /*0x20 */ 105 uint32_t txDataBuffer; /* 0x20 */
101} MIPS_T_NetControl; 106};
102 107
103#define MIPSNET_IO_EXTENT 0x40 /* being generous */ 108#define MIPSNET_IO_EXTENT 0x40 /* being generous */
104 109
105#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field) 110#define field_offset(field) (offsetof(struct net_control_block, field))
106 111
107#endif /* __MIPSNET_H */ 112#endif /* __MIPSNET_H */
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e8afa101433e..64c8151f2004 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.3.2-1.269" 78#define MYRI10GE_VERSION_STR "1.3.2-1.287"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -214,6 +214,8 @@ struct myri10ge_priv {
214 unsigned long serial_number; 214 unsigned long serial_number;
215 int vendor_specific_offset; 215 int vendor_specific_offset;
216 int fw_multicast_support; 216 int fw_multicast_support;
217 unsigned long features;
218 u32 max_tso6;
217 u32 read_dma; 219 u32 read_dma;
218 u32 write_dma; 220 u32 write_dma;
219 u32 read_write_dma; 221 u32 read_write_dma;
@@ -311,6 +313,7 @@ MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n");
311#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) 313#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
312 314
313static void myri10ge_set_multicast_list(struct net_device *dev); 315static void myri10ge_set_multicast_list(struct net_device *dev);
316static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev);
314 317
315static inline void put_be32(__be32 val, __be32 __iomem * p) 318static inline void put_be32(__be32 val, __be32 __iomem * p)
316{ 319{
@@ -612,6 +615,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
612 __be32 buf[16]; 615 __be32 buf[16];
613 u32 dma_low, dma_high, size; 616 u32 dma_low, dma_high, size;
614 int status, i; 617 int status, i;
618 struct myri10ge_cmd cmd;
615 619
616 size = 0; 620 size = 0;
617 status = myri10ge_load_hotplug_firmware(mgp, &size); 621 status = myri10ge_load_hotplug_firmware(mgp, &size);
@@ -688,6 +692,14 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
688 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 692 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
689 myri10ge_dummy_rdma(mgp, 1); 693 myri10ge_dummy_rdma(mgp, 1);
690 694
695 /* probe for IPv6 TSO support */
696 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
697 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
698 &cmd, 0);
699 if (status == 0) {
700 mgp->max_tso6 = cmd.data0;
701 mgp->features |= NETIF_F_TSO6;
702 }
691 return 0; 703 return 0;
692} 704}
693 705
@@ -1047,7 +1059,8 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1047 1059
1048 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; 1060 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
1049 1061
1050 /* allocate an skb to attach the page(s) to. */ 1062 /* allocate an skb to attach the page(s) to. This is done
1063 * after trying LRO, so as to avoid skb allocation overheads */
1051 1064
1052 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); 1065 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1053 if (unlikely(skb == NULL)) { 1066 if (unlikely(skb == NULL)) {
@@ -1217,7 +1230,8 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1217 1230
1218static int myri10ge_poll(struct napi_struct *napi, int budget) 1231static int myri10ge_poll(struct napi_struct *napi, int budget)
1219{ 1232{
1220 struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); 1233 struct myri10ge_priv *mgp =
1234 container_of(napi, struct myri10ge_priv, napi);
1221 struct net_device *netdev = mgp->dev; 1235 struct net_device *netdev = mgp->dev;
1222 struct myri10ge_rx_done *rx_done = &mgp->rx_done; 1236 struct myri10ge_rx_done *rx_done = &mgp->rx_done;
1223 int work_done; 1237 int work_done;
@@ -1382,6 +1396,18 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1382 return 0; 1396 return 0;
1383} 1397}
1384 1398
1399static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1400{
1401 struct myri10ge_priv *mgp = netdev_priv(netdev);
1402 unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1403
1404 if (tso_enabled)
1405 netdev->features |= flags;
1406 else
1407 netdev->features &= ~flags;
1408 return 0;
1409}
1410
1385static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { 1411static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
1386 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1412 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1387 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1413 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -1506,7 +1532,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1506 .set_rx_csum = myri10ge_set_rx_csum, 1532 .set_rx_csum = myri10ge_set_rx_csum,
1507 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1533 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1508 .set_sg = ethtool_op_set_sg, 1534 .set_sg = ethtool_op_set_sg,
1509 .set_tso = ethtool_op_set_tso, 1535 .set_tso = myri10ge_set_tso,
1510 .get_link = ethtool_op_get_link, 1536 .get_link = ethtool_op_get_link,
1511 .get_strings = myri10ge_get_strings, 1537 .get_strings = myri10ge_get_strings,
1512 .get_sset_count = myri10ge_get_sset_count, 1538 .get_sset_count = myri10ge_get_sset_count,
@@ -2164,7 +2190,8 @@ again:
2164 pseudo_hdr_offset = cksum_offset + skb->csum_offset; 2190 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2165 /* If the headers are excessively large, then we must 2191 /* If the headers are excessively large, then we must
2166 * fall back to a software checksum */ 2192 * fall back to a software checksum */
2167 if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { 2193 if (unlikely(!mss && (cksum_offset > 255 ||
2194 pseudo_hdr_offset > 127))) {
2168 if (skb_checksum_help(skb)) 2195 if (skb_checksum_help(skb))
2169 goto drop; 2196 goto drop;
2170 cksum_offset = 0; 2197 cksum_offset = 0;
@@ -2184,9 +2211,18 @@ again:
2184 /* negative cum_len signifies to the 2211 /* negative cum_len signifies to the
2185 * send loop that we are still in the 2212 * send loop that we are still in the
2186 * header portion of the TSO packet. 2213 * header portion of the TSO packet.
2187 * TSO header must be at most 134 bytes long */ 2214 * TSO header can be at most 1KB long */
2188 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); 2215 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2189 2216
2217 /* for IPv6 TSO, the checksum offset stores the
2218 * TCP header length, to save the firmware from
2219 * the need to parse the headers */
2220 if (skb_is_gso_v6(skb)) {
2221 cksum_offset = tcp_hdrlen(skb);
2222 /* Can only handle headers <= max_tso6 long */
2223 if (unlikely(-cum_len > mgp->max_tso6))
2224 return myri10ge_sw_tso(skb, dev);
2225 }
2190 /* for TSO, pseudo_hdr_offset holds mss. 2226 /* for TSO, pseudo_hdr_offset holds mss.
2191 * The firmware figures out where to put 2227 * The firmware figures out where to put
2192 * the checksum by parsing the header. */ 2228 * the checksum by parsing the header. */
@@ -2301,10 +2337,12 @@ again:
2301 req++; 2337 req++;
2302 count++; 2338 count++;
2303 rdma_count++; 2339 rdma_count++;
2304 if (unlikely(cksum_offset > seglen)) 2340 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
2305 cksum_offset -= seglen; 2341 if (unlikely(cksum_offset > seglen))
2306 else 2342 cksum_offset -= seglen;
2307 cksum_offset = 0; 2343 else
2344 cksum_offset = 0;
2345 }
2308 } 2346 }
2309 if (frag_idx == frag_cnt) 2347 if (frag_idx == frag_cnt)
2310 break; 2348 break;
@@ -2387,6 +2425,41 @@ drop:
2387 2425
2388} 2426}
2389 2427
2428static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
2429{
2430 struct sk_buff *segs, *curr;
2431 struct myri10ge_priv *mgp = dev->priv;
2432 int status;
2433
2434 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
2435 if (unlikely(IS_ERR(segs)))
2436 goto drop;
2437
2438 while (segs) {
2439 curr = segs;
2440 segs = segs->next;
2441 curr->next = NULL;
2442 status = myri10ge_xmit(curr, dev);
2443 if (status != 0) {
2444 dev_kfree_skb_any(curr);
2445 if (segs != NULL) {
2446 curr = segs;
2447 segs = segs->next;
2448 curr->next = NULL;
2449 dev_kfree_skb_any(segs);
2450 }
2451 goto drop;
2452 }
2453 }
2454 dev_kfree_skb_any(skb);
2455 return 0;
2456
2457drop:
2458 dev_kfree_skb_any(skb);
2459 mgp->stats.tx_dropped += 1;
2460 return 0;
2461}
2462
2390static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) 2463static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2391{ 2464{
2392 struct myri10ge_priv *mgp = netdev_priv(dev); 2465 struct myri10ge_priv *mgp = netdev_priv(dev);
@@ -2706,7 +2779,6 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2706} 2779}
2707 2780
2708#ifdef CONFIG_PM 2781#ifdef CONFIG_PM
2709
2710static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) 2782static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
2711{ 2783{
2712 struct myri10ge_priv *mgp; 2784 struct myri10ge_priv *mgp;
@@ -2787,7 +2859,6 @@ abort_with_enabled:
2787 return -EIO; 2859 return -EIO;
2788 2860
2789} 2861}
2790
2791#endif /* CONFIG_PM */ 2862#endif /* CONFIG_PM */
2792 2863
2793static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) 2864static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
@@ -2954,8 +3025,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2954 3025
2955 mgp = netdev_priv(netdev); 3026 mgp = netdev_priv(netdev);
2956 mgp->dev = netdev; 3027 mgp->dev = netdev;
2957 netif_napi_add(netdev, &mgp->napi, 3028 netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight);
2958 myri10ge_poll, myri10ge_napi_weight);
2959 mgp->pdev = pdev; 3029 mgp->pdev = pdev;
2960 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3030 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
2961 mgp->pause = myri10ge_flow_control; 3031 mgp->pause = myri10ge_flow_control;
@@ -3077,7 +3147,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3077 netdev->change_mtu = myri10ge_change_mtu; 3147 netdev->change_mtu = myri10ge_change_mtu;
3078 netdev->set_multicast_list = myri10ge_set_multicast_list; 3148 netdev->set_multicast_list = myri10ge_set_multicast_list;
3079 netdev->set_mac_address = myri10ge_set_mac_address; 3149 netdev->set_mac_address = myri10ge_set_mac_address;
3080 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 3150 netdev->features = mgp->features;
3081 if (dac_enabled) 3151 if (dac_enabled)
3082 netdev->features |= NETIF_F_HIGHDMA; 3152 netdev->features |= NETIF_F_HIGHDMA;
3083 3153
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index a1d2a22296a9..58e57178c563 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -10,7 +10,7 @@ struct mcp_dma_addr {
10 __be32 low; 10 __be32 low;
11}; 11};
12 12
13/* 4 Bytes */ 13/* 4 Bytes. 8 Bytes for NDIS drivers. */
14struct mcp_slot { 14struct mcp_slot {
15 __sum16 checksum; 15 __sum16 checksum;
16 __be16 length; 16 __be16 length;
@@ -205,8 +205,87 @@ enum myri10ge_mcp_cmd_type {
205 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned 205 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
206 * chipset */ 206 * chipset */
207 207
208 MXGEFW_CMD_UNALIGNED_STATUS 208 MXGEFW_CMD_UNALIGNED_STATUS,
209 /* return data = boolean, true if the chipset is known to be unaligned */ 209 /* return data = boolean, true if the chipset is known to be unaligned */
210
211 MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS,
212 /* data0 = number of big buffers to use. It must be 0 or a power of 2.
213 * 0 indicates that the NIC consumes as many buffers as they are required
214 * for packet. This is the default behavior.
215 * A power of 2 number indicates that the NIC always uses the specified
216 * number of buffers for each big receive packet.
217 * It is up to the driver to ensure that this value is big enough for
218 * the NIC to be able to receive maximum-sized packets.
219 */
220
221 MXGEFW_CMD_GET_MAX_RSS_QUEUES,
222 MXGEFW_CMD_ENABLE_RSS_QUEUES,
223 /* data0 = number of slices n (0, 1, ..., n-1) to enable
224 * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue.
225 * If all queues share one interrupt, the driver must have set
226 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
227 */
228 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
229 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
230 /* data0, data1 = bus address lsw, msw */
231 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
232 /* get the offset of the indirection table */
233 MXGEFW_CMD_SET_RSS_TABLE_SIZE,
234 /* set the size of the indirection table */
235 MXGEFW_CMD_GET_RSS_KEY_OFFSET,
236 /* get the offset of the secret key */
237 MXGEFW_CMD_RSS_KEY_UPDATED,
238 /* tell nic that the secret key's been updated */
239 MXGEFW_CMD_SET_RSS_ENABLE,
240 /* data0 = enable/disable rss
241 * 0: disable rss. nic does not distribute receive packets.
242 * 1: enable rss. nic distributes receive packets among queues.
243 * data1 = hash type
244 * 1: IPV4
245 * 2: TCP_IPV4
246 * 3: IPV4 | TCP_IPV4
247 */
248
249 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
250 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
251 * If the header size of a IPv6 TSO packet is larger than the specified
252 * value, then the driver must not use TSO.
253 * This size restriction only applies to IPv6 TSO.
254 * For IPv4 TSO, the maximum size of the headers is fixed, and the NIC
255 * always has enough header buffer to store maximum-sized headers.
256 */
257
258 MXGEFW_CMD_SET_TSO_MODE,
259 /* data0 = TSO mode.
260 * 0: Linux/FreeBSD style (NIC default)
261 * 1: NDIS/NetBSD style
262 */
263
264 MXGEFW_CMD_MDIO_READ,
265 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
266 MXGEFW_CMD_MDIO_WRITE,
267 /* data0 = dev_addr, data1 = register/addr, data2 = value */
268
269 MXGEFW_CMD_XFP_I2C_READ,
270 /* Starts to get a fresh copy of one byte or of the whole xfp i2c table, the
271 * obtained data is cached inside the xaui-xfi chip :
272 * data0 : "all" flag : 0 => get one byte, 1=> get 256 bytes,
273 * data1 : if (data0 == 0): index of byte to refresh [ not used otherwise ]
274 * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes
275 * During the i2c operation, MXGEFW_CMD_XFP_I2C_READ or MXGEFW_CMD_XFP_BYTE attempts
276 * will return MXGEFW_CMD_ERROR_BUSY
277 */
278 MXGEFW_CMD_XFP_BYTE,
279 /* Return the last obtained copy of a given byte in the xfp i2c table
280 * (copy cached during the last relevant MXGEFW_CMD_XFP_I2C_READ)
281 * data0 : index of the desired table entry
282 * Return data = the byte stored at the requested index in the table
283 */
284
285 MXGEFW_CMD_GET_VPUMP_OFFSET,
286 /* Return data = NIC memory offset of mcp_vpump_public_global */
287 MXGEFW_CMD_RESET_VPUMP,
288 /* Resets the VPUMP state */
210}; 289};
211 290
212enum myri10ge_mcp_cmd_status { 291enum myri10ge_mcp_cmd_status {
@@ -220,7 +299,10 @@ enum myri10ge_mcp_cmd_status {
220 MXGEFW_CMD_ERROR_BAD_PORT, 299 MXGEFW_CMD_ERROR_BAD_PORT,
221 MXGEFW_CMD_ERROR_RESOURCES, 300 MXGEFW_CMD_ERROR_RESOURCES,
222 MXGEFW_CMD_ERROR_MULTICAST, 301 MXGEFW_CMD_ERROR_MULTICAST,
223 MXGEFW_CMD_ERROR_UNALIGNED 302 MXGEFW_CMD_ERROR_UNALIGNED,
303 MXGEFW_CMD_ERROR_NO_MDIO,
304 MXGEFW_CMD_ERROR_XFP_FAILURE,
305 MXGEFW_CMD_ERROR_XFP_ABSENT
224}; 306};
225 307
226#define MXGEFW_OLD_IRQ_DATA_LEN 40 308#define MXGEFW_OLD_IRQ_DATA_LEN 40
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 527f9dcc7f69..50e1ec67ef9c 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1576,7 +1576,7 @@ static int netdev_open(struct net_device *dev)
1576 1576
1577 /* Set the timer to check for link beat. */ 1577 /* Set the timer to check for link beat. */
1578 init_timer(&np->timer); 1578 init_timer(&np->timer);
1579 np->timer.expires = jiffies + NATSEMI_TIMER_FREQ; 1579 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1580 np->timer.data = (unsigned long)dev; 1580 np->timer.data = (unsigned long)dev;
1581 np->timer.function = &netdev_timer; /* timer handler */ 1581 np->timer.function = &netdev_timer; /* timer handler */
1582 add_timer(&np->timer); 1582 add_timer(&np->timer);
@@ -1856,7 +1856,11 @@ static void netdev_timer(unsigned long data)
1856 next_tick = 1; 1856 next_tick = 1;
1857 } 1857 }
1858 } 1858 }
1859 mod_timer(&np->timer, jiffies + next_tick); 1859
1860 if (next_tick > 1)
1861 mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1862 else
1863 mod_timer(&np->timer, jiffies + next_tick);
1860} 1864}
1861 1865
1862static void dump_ring(struct net_device *dev) 1866static void dump_ring(struct net_device *dev)
@@ -3310,13 +3314,19 @@ static int natsemi_resume (struct pci_dev *pdev)
3310{ 3314{
3311 struct net_device *dev = pci_get_drvdata (pdev); 3315 struct net_device *dev = pci_get_drvdata (pdev);
3312 struct netdev_private *np = netdev_priv(dev); 3316 struct netdev_private *np = netdev_priv(dev);
3317 int ret = 0;
3313 3318
3314 rtnl_lock(); 3319 rtnl_lock();
3315 if (netif_device_present(dev)) 3320 if (netif_device_present(dev))
3316 goto out; 3321 goto out;
3317 if (netif_running(dev)) { 3322 if (netif_running(dev)) {
3318 BUG_ON(!np->hands_off); 3323 BUG_ON(!np->hands_off);
3319 pci_enable_device(pdev); 3324 ret = pci_enable_device(pdev);
3325 if (ret < 0) {
3326 dev_err(&pdev->dev,
3327 "pci_enable_device() failed: %d\n", ret);
3328 goto out;
3329 }
3320 /* pci_power_on(pdev); */ 3330 /* pci_power_on(pdev); */
3321 3331
3322 napi_enable(&np->napi); 3332 napi_enable(&np->napi);
@@ -3331,12 +3341,12 @@ static int natsemi_resume (struct pci_dev *pdev)
3331 spin_unlock_irq(&np->lock); 3341 spin_unlock_irq(&np->lock);
3332 enable_irq(dev->irq); 3342 enable_irq(dev->irq);
3333 3343
3334 mod_timer(&np->timer, jiffies + 1*HZ); 3344 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3335 } 3345 }
3336 netif_device_attach(dev); 3346 netif_device_attach(dev);
3337out: 3347out:
3338 rtnl_unlock(); 3348 rtnl_unlock();
3339 return 0; 3349 return ret;
3340} 3350}
3341 3351
3342#endif /* CONFIG_PM */ 3352#endif /* CONFIG_PM */
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 368f2560856d..fbc7531d3c7d 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -93,7 +93,7 @@ static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr
93 bus_width = *(volatile unsigned char *)ABWCR; 93 bus_width = *(volatile unsigned char *)ABWCR;
94 bus_width &= 1 << ((base_addr >> 21) & 7); 94 bus_width &= 1 << ((base_addr >> 21) & 7);
95 95
96 for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++) 96 for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
97 if (bus_width == 0) 97 if (bus_width == 0)
98 reg_offset[i] = i * 2 + 1; 98 reg_offset[i] = i * 2 + 1;
99 else 99 else
@@ -115,7 +115,7 @@ static int h8300_ne_irq[] = {EXT_IRQ5};
115 115
116static inline int init_dev(struct net_device *dev) 116static inline int init_dev(struct net_device *dev)
117{ 117{
118 if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) { 118 if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
119 dev->base_addr = h8300_ne_base[h8300_ne_count]; 119 dev->base_addr = h8300_ne_base[h8300_ne_count];
120 dev->irq = h8300_ne_irq[h8300_ne_count]; 120 dev->irq = h8300_ne_irq[h8300_ne_count];
121 h8300_ne_count++; 121 h8300_ne_count++;
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index 14361e885415..c65199df8a7f 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -97,13 +97,16 @@ static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp)
97 &lp->evm_saa9730_regs->InterruptBlock1); 97 &lp->evm_saa9730_regs->InterruptBlock1);
98} 98}
99 99
100static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) 100static void __used show_saa9730_regs(struct net_device *dev)
101{ 101{
102 struct lan_saa9730_private *lp = netdev_priv(dev);
102 int i, j; 103 int i, j;
104
103 printk("TxmBufferA = %p\n", lp->TxmBuffer[0][0]); 105 printk("TxmBufferA = %p\n", lp->TxmBuffer[0][0]);
104 printk("TxmBufferB = %p\n", lp->TxmBuffer[1][0]); 106 printk("TxmBufferB = %p\n", lp->TxmBuffer[1][0]);
105 printk("RcvBufferA = %p\n", lp->RcvBuffer[0][0]); 107 printk("RcvBufferA = %p\n", lp->RcvBuffer[0][0]);
106 printk("RcvBufferB = %p\n", lp->RcvBuffer[1][0]); 108 printk("RcvBufferB = %p\n", lp->RcvBuffer[1][0]);
109
107 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { 110 for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
108 for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) { 111 for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
109 printk("TxmBuffer[%d][%d] = %x\n", i, j, 112 printk("TxmBuffer[%d][%d] = %x\n", i, j,
@@ -146,11 +149,13 @@ static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp)
146 readl(&lp->lan_saa9730_regs->RxCtl)); 149 readl(&lp->lan_saa9730_regs->RxCtl));
147 printk("lp->lan_saa9730_regs->RxStatus = %x\n", 150 printk("lp->lan_saa9730_regs->RxStatus = %x\n",
148 readl(&lp->lan_saa9730_regs->RxStatus)); 151 readl(&lp->lan_saa9730_regs->RxStatus));
152
149 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { 153 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) {
150 writel(i, &lp->lan_saa9730_regs->CamAddress); 154 writel(i, &lp->lan_saa9730_regs->CamAddress);
151 printk("lp->lan_saa9730_regs->CamData = %x\n", 155 printk("lp->lan_saa9730_regs->CamData = %x\n",
152 readl(&lp->lan_saa9730_regs->CamData)); 156 readl(&lp->lan_saa9730_regs->CamData));
153 } 157 }
158
154 printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); 159 printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets);
155 printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); 160 printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors);
156 printk("dev->stats.tx_aborted_errors = %lx\n", 161 printk("dev->stats.tx_aborted_errors = %lx\n",
@@ -855,7 +860,7 @@ static void lan_saa9730_tx_timeout(struct net_device *dev)
855 /* Transmitter timeout, serious problems */ 860 /* Transmitter timeout, serious problems */
856 dev->stats.tx_errors++; 861 dev->stats.tx_errors++;
857 printk("%s: transmit timed out, reset\n", dev->name); 862 printk("%s: transmit timed out, reset\n", dev->name);
858 /*show_saa9730_regs(lp); */ 863 /*show_saa9730_regs(dev); */
859 lan_saa9730_restart(lp); 864 lan_saa9730_restart(lp);
860 865
861 dev->trans_start = jiffies; 866 dev->trans_start = jiffies;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index a679f4310ce1..8038f2882c9b 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1461,7 +1461,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1461 } 1461 }
1462 return IRQ_NONE; 1462 return IRQ_NONE;
1463#else 1463#else
1464 struct tc35815_local *lp = dev->priv;
1465 int handled; 1464 int handled;
1466 u32 status; 1465 u32 status;
1467 1466
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 8d04654f0c59..4e1b84e6d66a 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1906,7 +1906,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1906 1906
1907 /************** pci *****************/ 1907 /************** pci *****************/
1908 if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ 1908 if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
1909 RET(err); /* it's not a problem though */ 1909 goto err_pci; /* it's not a problem though */
1910 1910
1911 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 1911 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
1912 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 1912 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
@@ -2076,6 +2076,7 @@ err_out_res:
2076 pci_release_regions(pdev); 2076 pci_release_regions(pdev);
2077err_dma: 2077err_dma:
2078 pci_disable_device(pdev); 2078 pci_disable_device(pdev);
2079err_pci:
2079 vfree(nic); 2080 vfree(nic);
2080 2081
2081 RET(err); 2082 RET(err);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30b1cca8144c..76efb3feffbf 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9034,7 +9034,7 @@ static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9034 int i; 9034 int i;
9035 u32 j; 9035 u32 j;
9036 9036
9037 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { 9037 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9038 for (j = 0; j < len; j += 4) { 9038 for (j = 0; j < len; j += 4) {
9039 u32 val; 9039 u32 val;
9040 9040
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 9b9cd83fb8b6..41f34bb91cad 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1041,7 +1041,7 @@ static struct InfoLeaf infoleaf_array[] = {
1041 {DC21142, dc21142_infoleaf}, 1041 {DC21142, dc21142_infoleaf},
1042 {DC21143, dc21143_infoleaf} 1042 {DC21143, dc21143_infoleaf}
1043}; 1043};
1044#define INFOLEAF_SIZE (sizeof(infoleaf_array)/(sizeof(int)+sizeof(int *))) 1044#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1045 1045
1046/* 1046/*
1047** List the SROM info block functions 1047** List the SROM info block functions
@@ -1056,7 +1056,7 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1056 compact_infoblock 1056 compact_infoblock
1057}; 1057};
1058 1058
1059#define COMPACT (sizeof(dc_infoblock)/sizeof(int *) - 1) 1059#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1060 1060
1061/* 1061/*
1062** Miscellaneous defines... 1062** Miscellaneous defines...
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index d00e7d41f6a5..bec413ba9bca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -63,7 +63,7 @@
63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
64 64
65void uec_set_ethtool_ops(struct net_device *netdev); 65void uec_set_ethtool_ops(struct net_device *netdev);
66 66
67static DEFINE_SPINLOCK(ugeth_lock); 67static DEFINE_SPINLOCK(ugeth_lock);
68 68
69static struct { 69static struct {
@@ -3454,9 +3454,12 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3454 u16 length, howmany = 0; 3454 u16 length, howmany = 0;
3455 u32 bd_status; 3455 u32 bd_status;
3456 u8 *bdBuffer; 3456 u8 *bdBuffer;
3457 struct net_device * dev;
3457 3458
3458 ugeth_vdbg("%s: IN", __FUNCTION__); 3459 ugeth_vdbg("%s: IN", __FUNCTION__);
3459 3460
3461 dev = ugeth->dev;
3462
3460 /* collect received buffers */ 3463 /* collect received buffers */
3461 bd = ugeth->rxBd[rxQ]; 3464 bd = ugeth->rxBd[rxQ];
3462 3465
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index b39a541b2509..05df0a345b60 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1342,11 +1342,11 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1342 if (flp->initialized) 1342 if (flp->initialized)
1343 return(-EINVAL); 1343 return(-EINVAL);
1344 1344
1345 for(i=0;i < sizeof(valid_port) / sizeof (int) ; i++) 1345 for(i=0; i < ARRAY_SIZE(valid_port); i++)
1346 if (valid_port[i] == map->base_addr) 1346 if (valid_port[i] == map->base_addr)
1347 break; 1347 break;
1348 1348
1349 if (i == sizeof(valid_port) / sizeof(int)) 1349 if (i == ARRAY_SIZE(valid_port))
1350 return(-EINVAL); 1350 return(-EINVAL);
1351 1351
1352 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ 1352 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
@@ -1487,12 +1487,12 @@ got_type:
1487 } 1487 }
1488 } 1488 }
1489 1489
1490 for(i=0;i < sizeof(valid_mem) / sizeof (int) ; i++) 1490 for(i=0; i < ARRAY_SIZE(valid_mem); i++)
1491 if (valid_mem[i] == map->mem_start) 1491 if (valid_mem[i] == map->mem_start)
1492 break; 1492 break;
1493 1493
1494 err = -EINVAL; 1494 err = -EINVAL;
1495 if (i == sizeof(valid_mem) / sizeof(int)) 1495 if (i == ARRAY_SIZE(valid_mem))
1496 goto fail2; 1496 goto fail2;
1497 1497
1498 if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E) 1498 if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f464b82c7d5f..7fd505cc4f7a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -74,22 +74,12 @@ struct netfront_info {
74 74
75 struct napi_struct napi; 75 struct napi_struct napi;
76 76
77 struct xen_netif_tx_front_ring tx;
78 struct xen_netif_rx_front_ring rx;
79
80 spinlock_t tx_lock;
81 spinlock_t rx_lock;
82
83 unsigned int evtchn; 77 unsigned int evtchn;
78 struct xenbus_device *xbdev;
84 79
85 /* Receive-ring batched refills. */ 80 spinlock_t tx_lock;
86#define RX_MIN_TARGET 8 81 struct xen_netif_tx_front_ring tx;
87#define RX_DFL_MIN_TARGET 64 82 int tx_ring_ref;
88#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
89 unsigned rx_min_target, rx_max_target, rx_target;
90 struct sk_buff_head rx_batch;
91
92 struct timer_list rx_refill_timer;
93 83
94 /* 84 /*
95 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
@@ -108,14 +98,23 @@ struct netfront_info {
108 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
109 unsigned tx_skb_freelist; 99 unsigned tx_skb_freelist;
110 100
101 spinlock_t rx_lock ____cacheline_aligned_in_smp;
102 struct xen_netif_rx_front_ring rx;
103 int rx_ring_ref;
104
105 /* Receive-ring batched refills. */
106#define RX_MIN_TARGET 8
107#define RX_DFL_MIN_TARGET 64
108#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
109 unsigned rx_min_target, rx_max_target, rx_target;
110 struct sk_buff_head rx_batch;
111
112 struct timer_list rx_refill_timer;
113
111 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
112 grant_ref_t gref_rx_head; 115 grant_ref_t gref_rx_head;
113 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
114 117
115 struct xenbus_device *xbdev;
116 int tx_ring_ref;
117 int rx_ring_ref;
118
119 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 118 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
120 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
121 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 120 struct mmu_update rx_mmu[NET_RX_RING_SIZE];