aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/7990.c4
-rw-r--r--drivers/net/8139cp.c7
-rw-r--r--drivers/net/8139too.c9
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/Space.c8
-rw-r--r--drivers/net/a2065.c4
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/atari_bionet.c675
-rw-r--r--drivers/net/atari_pamsnet.c878
-rw-r--r--drivers/net/atl1/atl1.h1
-rw-r--r--drivers/net/atl1/atl1_main.c4
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/ax88796.c4
-rw-r--r--drivers/net/bnx2.c509
-rw-r--r--drivers/net/bnx2.h66
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/dl2k.c7
-rw-r--r--drivers/net/dl2k.h1
-rw-r--r--drivers/net/dm9000.c17
-rw-r--r--drivers/net/dummy.c82
-rw-r--r--drivers/net/e100.c8
-rw-r--r--drivers/net/e1000/e1000_main.c3
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ifb.c78
-rw-r--r--drivers/net/irda/kingsun-sir.c4
-rw-r--r--drivers/net/irda/vlsi_ir.c27
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/macvlan.c496
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/qp.c21
-rw-r--r--drivers/net/mlx4/srq.c30
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/pppol2tp.c2486
-rw-r--r--drivers/net/s2io.c25
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/saa9730.c4
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c5
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c9
-rw-r--r--drivers/net/sunhme.c8
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/sunvnet.c1164
-rw-r--r--drivers/net/sunvnet.h70
-rw-r--r--drivers/net/tg3.c148
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tlan.c5
-rw-r--r--drivers/net/tulip/de4x5.c8
-rw-r--r--drivers/net/tulip/dmfe.c26
-rw-r--r--drivers/net/tulip/interrupt.c8
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c7
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c8
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/typhoon.c9
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/via-rhine.c17
-rw-r--r--drivers/net/via-velocity.c3
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/pc300_drv.c4
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/yellowfin.c2
95 files changed, 5177 insertions, 2014 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index da1a22c13865..ab18343e58ef 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev)
990 if (skb != NULL) { 990 if (skb != NULL) {
991 skb_reserve(skb, 2); /* 16 byte alignment */ 991 skb_reserve(skb, 2); /* 16 byte alignment */
992 skb_put(skb,totlen); 992 skb_put(skb,totlen);
993 eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); 993 skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
994 skb->protocol = eth_type_trans(skb, dev); 994 skb->protocol = eth_type_trans(skb, dev);
995 netif_rx(skb); 995 netif_rx(skb);
996 dev->last_rx = jiffies; 996 dev->last_rx = jiffies;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 0877fc372f4b..e89ace109a5d 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev)
333 333
334 skb_reserve (skb, 2); /* 16 byte align */ 334 skb_reserve (skb, 2); /* 16 byte align */
335 skb_put (skb, len); /* make room */ 335 skb_put (skb, len); /* make room */
336 eth_copy_and_sum(skb, 336 skb_copy_to_linear_data(skb,
337 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 337 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
338 len, 0); 338 len);
339 skb->protocol = eth_type_trans (skb, dev); 339 skb->protocol = eth_type_trans (skb, dev);
340 netif_rx (skb); 340 netif_rx (skb);
341 dev->last_rx = jiffies; 341 dev->last_rx = jiffies;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index f5223eca2f7b..e970e64bf966 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1822,7 +1822,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1822 void __iomem *regs; 1822 void __iomem *regs;
1823 resource_size_t pciaddr; 1823 resource_size_t pciaddr;
1824 unsigned int addr_len, i, pci_using_dac; 1824 unsigned int addr_len, i, pci_using_dac;
1825 u8 pci_rev;
1826 1825
1827#ifndef MODULE 1826#ifndef MODULE
1828 static int version_printed; 1827 static int version_printed;
@@ -1830,13 +1829,11 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1830 printk("%s", version); 1829 printk("%s", version);
1831#endif 1830#endif
1832 1831
1833 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1834
1835 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1832 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1836 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { 1833 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1837 dev_err(&pdev->dev, 1834 dev_err(&pdev->dev,
1838 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", 1835 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1839 pdev->vendor, pdev->device, pci_rev); 1836 pdev->vendor, pdev->device, pdev->revision);
1840 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n"); 1837 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1841 return -ENODEV; 1838 return -ENODEV;
1842 } 1839 }
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a844b1fe2dc4..327eaa7b4999 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -931,7 +931,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
931 int i, addr_len, option; 931 int i, addr_len, option;
932 void __iomem *ioaddr; 932 void __iomem *ioaddr;
933 static int board_idx = -1; 933 static int board_idx = -1;
934 u8 pci_rev;
935 934
936 assert (pdev != NULL); 935 assert (pdev != NULL);
937 assert (ent != NULL); 936 assert (ent != NULL);
@@ -949,13 +948,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
949 } 948 }
950#endif 949#endif
951 950
952 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
953
954 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 951 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
955 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { 952 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
956 dev_info(&pdev->dev, 953 dev_info(&pdev->dev,
957 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", 954 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
958 pdev->vendor, pdev->device, pci_rev); 955 pdev->vendor, pdev->device, pdev->revision);
959 dev_info(&pdev->dev, 956 dev_info(&pdev->dev,
960 "Use the \"8139cp\" driver for improved performance and stability.\n"); 957 "Use the \"8139cp\" driver for improved performance and stability.\n");
961 } 958 }
@@ -2017,7 +2014,7 @@ no_early_rx:
2017#if RX_BUF_IDX == 3 2014#if RX_BUF_IDX == 3
2018 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2015 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2019#else 2016#else
2020 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 2017 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
2021#endif 2018#endif
2022 skb_put (skb, pkt_size); 2019 skb_put (skb, pkt_size);
2023 2020
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4b9b7fe41ee0..43d03178064d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -25,6 +25,14 @@ menuconfig NETDEVICES
25# that for each of the symbols. 25# that for each of the symbols.
26if NETDEVICES 26if NETDEVICES
27 27
28config NETDEVICES_MULTIQUEUE
29 bool "Netdevice multiple hardware queue support"
30 ---help---
31 Say Y here if you want to allow the network stack to use multiple
32 hardware TX queues on an ethernet device.
33
34 Most people will say N here.
35
28config IFB 36config IFB
29 tristate "Intermediate Functional Block support" 37 tristate "Intermediate Functional Block support"
30 depends on NET_CLS_ACT 38 depends on NET_CLS_ACT
@@ -74,6 +82,16 @@ config BONDING
74 To compile this driver as a module, choose M here: the module 82 To compile this driver as a module, choose M here: the module
75 will be called bonding. 83 will be called bonding.
76 84
85config MACVLAN
86 tristate "MAC-VLAN support (EXPERIMENTAL)"
87 depends on EXPERIMENTAL
88 ---help---
89 This allows one to create virtual interfaces that map packets to
90 or from specific MAC addresses to a particular interface.
91
92 To compile this driver as a module, choose M here: the module
93 will be called macvlan.
94
77config EQUALIZER 95config EQUALIZER
78 tristate "EQL (serial line load balancing) support" 96 tristate "EQL (serial line load balancing) support"
79 ---help--- 97 ---help---
@@ -387,22 +405,6 @@ config ATARILANCE
387 on the AMD Lance chipset: RieblCard (with or without battery), or 405 on the AMD Lance chipset: RieblCard (with or without battery), or
388 PAMCard VME (also the version by Rhotron, with different addresses). 406 PAMCard VME (also the version by Rhotron, with different addresses).
389 407
390config ATARI_BIONET
391 tristate "BioNet-100 support"
392 depends on ATARI && ATARI_ACSI && BROKEN
393 help
394 Say Y to include support for BioData's BioNet-100 Ethernet adapter
395 for the ACSI port. The driver works (has to work...) with a polled
396 I/O scheme, so it's rather slow :-(
397
398config ATARI_PAMSNET
399 tristate "PAMsNet support"
400 depends on ATARI && ATARI_ACSI && BROKEN
401 help
402 Say Y to include support for the PAMsNet Ethernet adapter for the
403 ACSI port ("ACSI node"). The driver works (has to work...) with a
404 polled I/O scheme, so it's rather slow :-(
405
406config SUN3LANCE 408config SUN3LANCE
407 tristate "Sun3/Sun3x on-board LANCE support" 409 tristate "Sun3/Sun3x on-board LANCE support"
408 depends on SUN3 || SUN3X 410 depends on SUN3 || SUN3X
@@ -586,6 +588,12 @@ config CASSINI
586 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also 588 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
587 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf> 589 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
588 590
591config SUNVNET
592 tristate "Sun Virtual Network support"
593 depends on SUN_LDOMS
594 help
595 Support for virtual network devices under Sun Logical Domains.
596
589config NET_VENDOR_3COM 597config NET_VENDOR_3COM
590 bool "3COM cards" 598 bool "3COM cards"
591 depends on ISA || EISA || MCA || PCI 599 depends on ISA || EISA || MCA || PCI
@@ -877,7 +885,7 @@ config NET_NETX
877 885
878config DM9000 886config DM9000
879 tristate "DM9000 support" 887 tristate "DM9000 support"
880 depends on ARM || MIPS 888 depends on ARM || BLACKFIN || MIPS
881 select CRC32 889 select CRC32
882 select MII 890 select MII
883 ---help--- 891 ---help---
@@ -2784,6 +2792,19 @@ config PPPOATM
2784 which can lead to bad results if the ATM peer loses state and 2792 which can lead to bad results if the ATM peer loses state and
2785 changes its encapsulation unilaterally. 2793 changes its encapsulation unilaterally.
2786 2794
2795config PPPOL2TP
2796 tristate "PPP over L2TP (EXPERIMENTAL)"
2797 depends on EXPERIMENTAL && PPP
2798 help
2799 Support for PPP-over-L2TP socket family. L2TP is a protocol
2800 used by ISPs and enterprises to tunnel PPP traffic over UDP
2801 tunnels. L2TP is replacing PPTP for VPN uses.
2802
2803 This kernel component handles only L2TP data packets: a
2804 userland daemon handles L2TP the control protocol (tunnel
2805 and session setup). One such daemon is OpenL2TP
2806 (http://openl2tp.sourceforge.net/).
2807
2787config SLIP 2808config SLIP
2788 tristate "SLIP (serial line) support" 2809 tristate "SLIP (serial line) support"
2789 ---help--- 2810 ---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1bbcbedad04a..eb4167622a6a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SUNBMAC) += sunbmac.o
34obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o 34obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
35obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o 35obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
36obj-$(CONFIG_CASSINI) += cassini.o 36obj-$(CONFIG_CASSINI) += cassini.o
37obj-$(CONFIG_SUNVNET) += sunvnet.o
37 38
38obj-$(CONFIG_MACE) += mace.o 39obj-$(CONFIG_MACE) += mace.o
39obj-$(CONFIG_BMAC) += bmac.o 40obj-$(CONFIG_BMAC) += bmac.o
@@ -121,12 +122,14 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
121obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o 122obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
122obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 123obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
123obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 124obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
125obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
124 126
125obj-$(CONFIG_SLIP) += slip.o 127obj-$(CONFIG_SLIP) += slip.o
126obj-$(CONFIG_SLHC) += slhc.o 128obj-$(CONFIG_SLHC) += slhc.o
127 129
128obj-$(CONFIG_DUMMY) += dummy.o 130obj-$(CONFIG_DUMMY) += dummy.o
129obj-$(CONFIG_IFB) += ifb.o 131obj-$(CONFIG_IFB) += ifb.o
132obj-$(CONFIG_MACVLAN) += macvlan.o
130obj-$(CONFIG_DE600) += de600.o 133obj-$(CONFIG_DE600) += de600.o
131obj-$(CONFIG_DE620) += de620.o 134obj-$(CONFIG_DE620) += de620.o
132obj-$(CONFIG_LANCE) += lance.o 135obj-$(CONFIG_LANCE) += lance.o
@@ -178,8 +181,6 @@ obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
178obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o 181obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
179obj-$(CONFIG_DECLANCE) += declance.o 182obj-$(CONFIG_DECLANCE) += declance.o
180obj-$(CONFIG_ATARILANCE) += atarilance.o 183obj-$(CONFIG_ATARILANCE) += atarilance.o
181obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
182obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
183obj-$(CONFIG_A2065) += a2065.o 184obj-$(CONFIG_A2065) += a2065.o
184obj-$(CONFIG_HYDRA) += hydra.o 185obj-$(CONFIG_HYDRA) += hydra.o
185obj-$(CONFIG_ARIADNE) += ariadne.o 186obj-$(CONFIG_ARIADNE) += ariadne.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 1c3e293fbaf7..3b79c6cf21a3 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -75,8 +75,6 @@ extern struct net_device *atarilance_probe(int unit);
75extern struct net_device *sun3lance_probe(int unit); 75extern struct net_device *sun3lance_probe(int unit);
76extern struct net_device *sun3_82586_probe(int unit); 76extern struct net_device *sun3_82586_probe(int unit);
77extern struct net_device *apne_probe(int unit); 77extern struct net_device *apne_probe(int unit);
78extern struct net_device *bionet_probe(int unit);
79extern struct net_device *pamsnet_probe(int unit);
80extern struct net_device *cs89x0_probe(int unit); 78extern struct net_device *cs89x0_probe(int unit);
81extern struct net_device *hplance_probe(int unit); 79extern struct net_device *hplance_probe(int unit);
82extern struct net_device *bagetlance_probe(int unit); 80extern struct net_device *bagetlance_probe(int unit);
@@ -264,12 +262,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
264#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */ 262#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */
265 {apne_probe, 0}, 263 {apne_probe, 0},
266#endif 264#endif
267#ifdef CONFIG_ATARI_BIONET /* Atari Bionet Ethernet board */
268 {bionet_probe, 0},
269#endif
270#ifdef CONFIG_ATARI_PAMSNET /* Atari PAMsNet Ethernet board */
271 {pamsnet_probe, 0},
272#endif
273#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ 265#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
274 {mvme147lance_probe, 0}, 266 {mvme147lance_probe, 0},
275#endif 267#endif
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 81d5a374042a..a45de6975bfe 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev)
322 322
323 skb_reserve (skb, 2); /* 16 byte align */ 323 skb_reserve (skb, 2); /* 16 byte align */
324 skb_put (skb, len); /* make room */ 324 skb_put (skb, len); /* make room */
325 eth_copy_and_sum(skb, 325 skb_copy_to_linear_data(skb,
326 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 326 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
327 len, 0); 327 len);
328 skb->protocol = eth_type_trans (skb, dev); 328 skb->protocol = eth_type_trans (skb, dev);
329 netif_rx (skb); 329 netif_rx (skb);
330 dev->last_rx = jiffies; 330 dev->last_rx = jiffies;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index a241ae7855a3..bc5a38a6705f 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev)
746 746
747 skb_reserve(skb,2); /* 16 byte align */ 747 skb_reserve(skb,2); /* 16 byte align */
748 skb_put(skb,pkt_len); /* Make room */ 748 skb_put(skb,pkt_len); /* Make room */
749 eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); 749 skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
750 skb->protocol=eth_type_trans(skb,dev); 750 skb->protocol=eth_type_trans(skb,dev);
751#if 0 751#if 0
752 printk(KERN_DEBUG "RX pkt type 0x%04x from ", 752 printk(KERN_DEBUG "RX pkt type 0x%04x from ",
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2438c5bff237..f6ece1d43f6e 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
258 skb_reserve(skb, 2); 258 skb_reserve(skb, 2);
259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, 259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
260 length, DMA_FROM_DEVICE); 260 length, DMA_FROM_DEVICE);
261 eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0); 261 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
262 skb_put(skb, length); 262 skb_put(skb, length);
263 skb->protocol = eth_type_trans(skb, dev); 263 skb->protocol = eth_type_trans(skb, dev);
264 264
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c
deleted file mode 100644
index 3d87bd2b4194..000000000000
--- a/drivers/net/atari_bionet.c
+++ /dev/null
@@ -1,675 +0,0 @@
1/* bionet.c BioNet-100 device driver for linux68k.
2 *
3 * Version: @(#)bionet.c 1.0 02/06/96
4 *
5 * Author: Hartmut Laue <laue@ifk-mp.uni-kiel.de>
6 * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
7 *
8 * Little adaptions for integration into pl7 by Roman Hodek
9 *
10 * Some changes in bionet_poll_rx by Karl-Heinz Lohner
11 *
12 What is it ?
13 ------------
14 This driver controls the BIONET-100 LAN-Adapter which connects
15 an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
16
17 This version can be compiled as a loadable module (See the
18 compile command at the bottom of this file).
19 At load time, you can optionally set the debugging level and the
20 fastest response time on the command line of 'insmod'.
21
22 'bionet_debug'
23 controls the amount of diagnostic messages:
24 0 : no messages
25 >0 : see code for meaning of printed messages
26
27 'bionet_min_poll_time' (always >=1)
28 gives the time (in jiffies) between polls. Low values
29 increase the system load (beware!)
30
31 When loaded, a net device with the name 'bio0' becomes available,
32 which can be controlled with the usual 'ifconfig' command.
33
34 It is possible to compile this driver into the kernel like other
35 (net) drivers. For this purpose, some source files (e.g. config-files
36 makefiles, Space.c) must be changed accordingly. (You may refer to
37 other drivers how to do it.) In this case, the device will be detected
38 at boot time and (probably) appear as 'eth0'.
39
40 This code is based on several sources:
41 - The driver code for a parallel port ethernet adapter by
42 Donald Becker (see file 'atp.c' from the PC linux distribution)
43 - The ACSI code by Roman Hodek for the ATARI-ACSI harddisk support
44 and DMA handling.
45 - Very limited information about moving packets in and out of the
46 BIONET-adapter from the TCP package for TOS by BioData GmbH.
47
48 Theory of Operation
49 -------------------
50 Because the ATARI DMA port is usually shared between several
51 devices (eg. harddisk, floppy) we cannot block the ACSI bus
52 while waiting for interrupts. Therefore we use a polling mechanism
53 to fetch packets from the adapter. For the same reason, we send
54 packets without checking that the previous packet has been sent to
55 the LAN. We rely on the higher levels of the networking code to detect
56 missing packets and resend them.
57
58 Before we access the ATARI DMA controller, we check if another
59 process is using the DMA. If not, we lock the DMA, perform one or
60 more packet transfers and unlock the DMA before returning.
61 We do not use 'stdma_lock' unconditionally because it is unclear
62 if the networking code can be set to sleep, which will happen if
63 another (possibly slow) device is using the DMA controller.
64
65 The polling is done via timer interrupts which periodically
66 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
67 between polls varies depending on an estimate of the net activity.
68 The allowed range is given by the variable 'bionet_min_poll_time'
69 for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
70 for the higher (slowest) limit.
71
72 Whenever a packet arrives, we switch to fastest response by setting
73 the polling time to its lowest limit. If the following poll fails,
74 because no packets have arrived, we increase the time for the next
75 poll. When the net activity is low, the polling time effectively
76 stays at its maximum value, resulting in the lowest load for the
77 machine.
78 */
79
80#define MAX_POLL_TIME 10
81
82static char version[] =
83 "bionet.c:v1.0 06-feb-96 (c) Hartmut Laue.\n";
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/kernel.h>
89#include <linux/jiffies.h>
90#include <linux/types.h>
91#include <linux/fcntl.h>
92#include <linux/interrupt.h>
93#include <linux/ioport.h>
94#include <linux/in.h>
95#include <linux/slab.h>
96#include <linux/string.h>
97#include <linux/delay.h>
98#include <linux/timer.h>
99#include <linux/init.h>
100#include <linux/bitops.h>
101
102#include <linux/netdevice.h>
103#include <linux/etherdevice.h>
104#include <linux/skbuff.h>
105
106#include <asm/setup.h>
107#include <asm/pgtable.h>
108#include <asm/system.h>
109#include <asm/io.h>
110#include <asm/dma.h>
111#include <asm/atarihw.h>
112#include <asm/atariints.h>
113#include <asm/atari_acsi.h>
114#include <asm/atari_stdma.h>
115
116
117/* use 0 for production, 1 for verification, >2 for debug
118 */
119#ifndef NET_DEBUG
120#define NET_DEBUG 0
121#endif
122/*
123 * Global variable 'bionet_debug'. Can be set at load time by 'insmod'
124 */
125unsigned int bionet_debug = NET_DEBUG;
126module_param(bionet_debug, int, 0);
127MODULE_PARM_DESC(bionet_debug, "bionet debug level (0-2)");
128MODULE_LICENSE("GPL");
129
130static unsigned int bionet_min_poll_time = 2;
131
132
133/* Information that need to be kept for each board.
134 */
135struct net_local {
136 struct net_device_stats stats;
137 long open_time; /* for debugging */
138 int poll_time; /* polling time varies with net load */
139};
140
141static struct nic_pkt_s { /* packet format */
142 unsigned char status;
143 unsigned char dummy;
144 unsigned char l_lo, l_hi;
145 unsigned char buffer[3000];
146} *nic_packet;
147unsigned char *phys_nic_packet;
148
149/* Index to functions, as function prototypes.
150 */
151static int bionet_open(struct net_device *dev);
152static int bionet_send_packet(struct sk_buff *skb, struct net_device *dev);
153static void bionet_poll_rx(struct net_device *);
154static int bionet_close(struct net_device *dev);
155static struct net_device_stats *net_get_stats(struct net_device *dev);
156static void bionet_tick(unsigned long);
157
158static DEFINE_TIMER(bionet_timer, bionet_tick, 0, 0);
159
160#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
161
162/* The following routines access the ethernet board connected to the
163 * ACSI port via the st_dma chip.
164 */
165#define NODE_ADR 0x60
166
167#define C_READ 8
168#define C_WRITE 0x0a
169#define C_GETEA 0x0f
170#define C_SETCR 0x0e
171
172static int
173sendcmd(unsigned int a0, unsigned int mod, unsigned int cmd) {
174 unsigned int c;
175
176 dma_wd.dma_mode_status = (mod | ((a0) ? 2 : 0) | 0x88);
177 dma_wd.fdc_acces_seccount = cmd;
178 dma_wd.dma_mode_status = (mod | 0x8a);
179
180 if( !acsi_wait_for_IRQ(HZ/2) ) /* wait for cmd ack */
181 return -1; /* timeout */
182
183 c = dma_wd.fdc_acces_seccount;
184 return (c & 0xff);
185}
186
187
188static void
189set_status(int cr) {
190 sendcmd(0,0x100,NODE_ADR | C_SETCR); /* CMD: SET CR */
191 sendcmd(1,0x100,cr);
192
193 dma_wd.dma_mode_status = 0x80;
194}
195
196static int
197get_status(unsigned char *adr) {
198 int i,c;
199
200 DISABLE_IRQ();
201 c = sendcmd(0,0x00,NODE_ADR | C_GETEA); /* CMD: GET ETH ADR*/
202 if( c < 0 ) goto gsend;
203
204 /* now read status bytes */
205
206 for (i=0; i<6; i++) {
207 dma_wd.fdc_acces_seccount = 0; /* request next byte */
208
209 if( !acsi_wait_for_IRQ(HZ/2) ) { /* wait for cmd ack */
210 c = -1;
211 goto gsend; /* timeout */
212 }
213 c = dma_wd.fdc_acces_seccount;
214 *adr++ = (unsigned char)c;
215 }
216 c = 1;
217gsend:
218 dma_wd.dma_mode_status = 0x80;
219 return c;
220}
221
222static irqreturn_t
223bionet_intr(int irq, void *data) {
224 return IRQ_HANDLED;
225}
226
227
228static int
229get_frame(unsigned long paddr, int odd) {
230 int c;
231 unsigned long flags;
232
233 DISABLE_IRQ();
234 local_irq_save(flags);
235
236 dma_wd.dma_mode_status = 0x9a;
237 dma_wd.dma_mode_status = 0x19a;
238 dma_wd.dma_mode_status = 0x9a;
239 dma_wd.fdc_acces_seccount = 0x04; /* sector count (was 5) */
240 dma_wd.dma_lo = (unsigned char)paddr;
241 paddr >>= 8;
242 dma_wd.dma_md = (unsigned char)paddr;
243 paddr >>= 8;
244 dma_wd.dma_hi = (unsigned char)paddr;
245 local_irq_restore(flags);
246
247 c = sendcmd(0,0x00,NODE_ADR | C_READ); /* CMD: READ */
248 if( c < 128 ) goto rend;
249
250 /* now read block */
251
252 c = sendcmd(1,0x00,odd); /* odd flag for address shift */
253 dma_wd.dma_mode_status = 0x0a;
254
255 if( !acsi_wait_for_IRQ(100) ) { /* wait for DMA to complete */
256 c = -1;
257 goto rend;
258 }
259 dma_wd.dma_mode_status = 0x8a;
260 dma_wd.dma_mode_status = 0x18a;
261 dma_wd.dma_mode_status = 0x8a;
262 c = dma_wd.fdc_acces_seccount;
263
264 dma_wd.dma_mode_status = 0x88;
265 c = dma_wd.fdc_acces_seccount;
266 c = 1;
267
268rend:
269 dma_wd.dma_mode_status = 0x80;
270 udelay(40);
271 acsi_wait_for_noIRQ(20);
272 return c;
273}
274
275
276static int
277hardware_send_packet(unsigned long paddr, int cnt) {
278 unsigned int c;
279 unsigned long flags;
280
281 DISABLE_IRQ();
282 local_irq_save(flags);
283
284 dma_wd.dma_mode_status = 0x19a;
285 dma_wd.dma_mode_status = 0x9a;
286 dma_wd.dma_mode_status = 0x19a;
287 dma_wd.dma_lo = (unsigned char)paddr;
288 paddr >>= 8;
289 dma_wd.dma_md = (unsigned char)paddr;
290 paddr >>= 8;
291 dma_wd.dma_hi = (unsigned char)paddr;
292
293 dma_wd.fdc_acces_seccount = 0x4; /* sector count */
294 local_irq_restore(flags);
295
296 c = sendcmd(0,0x100,NODE_ADR | C_WRITE); /* CMD: WRITE */
297 c = sendcmd(1,0x100,cnt&0xff);
298 c = sendcmd(1,0x100,cnt>>8);
299
300 /* now write block */
301
302 dma_wd.dma_mode_status = 0x10a; /* DMA enable */
303 if( !acsi_wait_for_IRQ(100) ) /* wait for DMA to complete */
304 goto end;
305
306 dma_wd.dma_mode_status = 0x19a; /* DMA disable ! */
307 c = dma_wd.fdc_acces_seccount;
308
309end:
310 c = sendcmd(1,0x100,0);
311 c = sendcmd(1,0x100,0);
312
313 dma_wd.dma_mode_status = 0x180;
314 udelay(40);
315 acsi_wait_for_noIRQ(20);
316 return( c & 0x02);
317}
318
319
320/* Check for a network adaptor of this type, and return '0' if one exists.
321 */
322struct net_device * __init bionet_probe(int unit)
323{
324 struct net_device *dev;
325 unsigned char station_addr[6];
326 static unsigned version_printed;
327 static int no_more_found; /* avoid "Probing for..." printed 4 times */
328 int i;
329 int err;
330
331 if (!MACH_IS_ATARI || no_more_found)
332 return ERR_PTR(-ENODEV);
333
334 dev = alloc_etherdev(sizeof(struct net_local));
335 if (!dev)
336 return ERR_PTR(-ENOMEM);
337 if (unit >= 0) {
338 sprintf(dev->name, "eth%d", unit);
339 netdev_boot_setup_check(dev);
340 }
341 SET_MODULE_OWNER(dev);
342
343 printk("Probing for BioNet 100 Adapter...\n");
344
345 stdma_lock(bionet_intr, NULL);
346 i = get_status(station_addr); /* Read the station address PROM. */
347 ENABLE_IRQ();
348 stdma_release();
349
350 /* Check the first three octets of the S.A. for the manufactor's code.
351 */
352
353 if( i < 0
354 || station_addr[0] != 'B'
355 || station_addr[1] != 'I'
356 || station_addr[2] != 'O' ) {
357 no_more_found = 1;
358 printk( "No BioNet 100 found.\n" );
359 free_netdev(dev);
360 return ERR_PTR(-ENODEV);
361 }
362
363 if (bionet_debug > 0 && version_printed++ == 0)
364 printk(version);
365
366 printk("%s: %s found, eth-addr: %02x-%02x-%02x:%02x-%02x-%02x.\n",
367 dev->name, "BioNet 100",
368 station_addr[0], station_addr[1], station_addr[2],
369 station_addr[3], station_addr[4], station_addr[5]);
370
371 /* Initialize the device structure. */
372
373 nic_packet = (struct nic_pkt_s *)acsi_buffer;
374 phys_nic_packet = (unsigned char *)phys_acsi_buffer;
375 if (bionet_debug > 0) {
376 printk("nic_packet at 0x%p, phys at 0x%p\n",
377 nic_packet, phys_nic_packet );
378 }
379
380 dev->open = bionet_open;
381 dev->stop = bionet_close;
382 dev->hard_start_xmit = bionet_send_packet;
383 dev->get_stats = net_get_stats;
384
385 /* Fill in the fields of the device structure with ethernet-generic
386 * values. This should be in a common file instead of per-driver.
387 */
388
389 for (i = 0; i < ETH_ALEN; i++) {
390#if 0
391 dev->broadcast[i] = 0xff;
392#endif
393 dev->dev_addr[i] = station_addr[i];
394 }
395 err = register_netdev(dev);
396 if (!err)
397 return dev;
398 free_netdev(dev);
399 return ERR_PTR(err);
400}
401
402/* Open/initialize the board. This is called (in the current kernel)
403 sometime after booting when the 'ifconfig' program is run.
404
405 This routine should set everything up anew at each open, even
406 registers that "should" only need to be set once at boot, so that
407 there is non-reboot way to recover if something goes wrong.
408 */
409static int
410bionet_open(struct net_device *dev) {
411 struct net_local *lp = netdev_priv(dev);
412
413 if (bionet_debug > 0)
414 printk("bionet_open\n");
415 stdma_lock(bionet_intr, NULL);
416
417 /* Reset the hardware here.
418 */
419 set_status(4);
420 lp->open_time = 0; /*jiffies*/
421 lp->poll_time = MAX_POLL_TIME;
422
423 dev->tbusy = 0;
424 dev->interrupt = 0;
425 dev->start = 1;
426
427 stdma_release();
428 bionet_timer.data = (long)dev;
429 bionet_timer.expires = jiffies + lp->poll_time;
430 add_timer(&bionet_timer);
431 return 0;
432}
433
434static int
435bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
436 struct net_local *lp = netdev_priv(dev);
437 unsigned long flags;
438
439 /* Block a timer-based transmit from overlapping. This could better be
440 * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
441 */
442 local_irq_save(flags);
443
444 if (stdma_islocked()) {
445 local_irq_restore(flags);
446 lp->stats.tx_errors++;
447 }
448 else {
449 int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
450 unsigned long buf = virt_to_phys(skb->data);
451 int stat;
452
453 stdma_lock(bionet_intr, NULL);
454 local_irq_restore(flags);
455 if( !STRAM_ADDR(buf+length-1) ) {
456 skb_copy_from_linear_data(skb, nic_packet->buffer,
457 length);
458 buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
459 }
460
461 if (bionet_debug >1) {
462 u_char *data = nic_packet->buffer, *p;
463 int i;
464
465 printk( "%s: TX pkt type 0x%4x from ", dev->name,
466 ((u_short *)data)[6]);
467
468 for( p = &data[6], i = 0; i < 6; i++ )
469 printk("%02x%s", *p++,i != 5 ? ":" : "" );
470 printk(" to ");
471
472 for( p = data, i = 0; i < 6; i++ )
473 printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
474
475 printk( "%s: ", dev->name );
476 printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
477 " %02x%02x%02x%02x len %d\n",
478 data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
479 data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
480 data[28], data[29], data[30], data[31], data[32], data[33],
481 length );
482 }
483 dma_cache_maintenance(buf, length, 1);
484
485 stat = hardware_send_packet(buf, length);
486 ENABLE_IRQ();
487 stdma_release();
488
489 dev->trans_start = jiffies;
490 dev->tbusy = 0;
491 lp->stats.tx_packets++;
492 lp->stats.tx_bytes+=length;
493 }
494 dev_kfree_skb(skb);
495
496 return 0;
497}
498
499/* We have a good packet(s), get it/them out of the buffers.
500 */
501static void
502bionet_poll_rx(struct net_device *dev) {
503 struct net_local *lp = netdev_priv(dev);
504 int boguscount = 10;
505 int pkt_len, status;
506 unsigned long flags;
507
508 local_irq_save(flags);
509 /* ++roman: Take care at locking the ST-DMA... This must be done with ints
510 * off, since otherwise an int could slip in between the question and the
511 * locking itself, and then we'd go to sleep... And locking itself is
512 * necessary to keep the floppy_change timer from working with ST-DMA
513 * registers. */
514 if (stdma_islocked()) {
515 local_irq_restore(flags);
516 return;
517 }
518 stdma_lock(bionet_intr, NULL);
519 DISABLE_IRQ();
520 local_irq_restore(flags);
521
522 if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
523
524 while(boguscount--) {
525 status = get_frame((unsigned long)phys_nic_packet, 0);
526
527 if( status == 0 ) break;
528
529 /* Good packet... */
530
531 dma_cache_maintenance((unsigned long)phys_nic_packet, 1520, 0);
532
533 pkt_len = (nic_packet->l_hi << 8) | nic_packet->l_lo;
534
535 lp->poll_time = bionet_min_poll_time; /* fast poll */
536 if( pkt_len >= 60 && pkt_len <= 1520 ) {
537 /* ^^^^ war 1514 KHL */
538 /* Malloc up new buffer.
539 */
540 struct sk_buff *skb = dev_alloc_skb( pkt_len + 2 );
541 if (skb == NULL) {
542 printk("%s: Memory squeeze, dropping packet.\n",
543 dev->name);
544 lp->stats.rx_dropped++;
545 break;
546 }
547
548 skb_reserve( skb, 2 ); /* 16 Byte align */
549 skb_put( skb, pkt_len ); /* make room */
550
551 /* 'skb->data' points to the start of sk_buff data area.
552 */
553 skb_copy_to_linear_data(skb, nic_packet->buffer,
554 pkt_len);
555 skb->protocol = eth_type_trans( skb, dev );
556 netif_rx(skb);
557 dev->last_rx = jiffies;
558 lp->stats.rx_packets++;
559 lp->stats.rx_bytes+=pkt_len;
560
561 /* If any worth-while packets have been received, dev_rint()
562 has done a mark_bh(INET_BH) for us and will work on them
563 when we get to the bottom-half routine.
564 */
565
566 if (bionet_debug >1) {
567 u_char *data = nic_packet->buffer, *p;
568 int i;
569
570 printk( "%s: RX pkt type 0x%4x from ", dev->name,
571 ((u_short *)data)[6]);
572
573
574 for( p = &data[6], i = 0; i < 6; i++ )
575 printk("%02x%s", *p++,i != 5 ? ":" : "" );
576 printk(" to ");
577 for( p = data, i = 0; i < 6; i++ )
578 printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
579
580 printk( "%s: ", dev->name );
581 printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
582 " %02x%02x%02x%02x len %d\n",
583 data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
584 data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
585 data[28], data[29], data[30], data[31], data[32], data[33],
586 pkt_len );
587 }
588 }
589 else {
590 printk(" Packet has wrong length: %04d bytes\n", pkt_len);
591 lp->stats.rx_errors++;
592 }
593 }
594 stdma_release();
595 ENABLE_IRQ();
596 return;
597}
598
599/* bionet_tick: called by bionet_timer. Reads packets from the adapter,
600 * passes them to the higher layers and restarts the timer.
601 */
602static void
603bionet_tick(unsigned long data) {
604 struct net_device *dev = (struct net_device *)data;
605 struct net_local *lp = netdev_priv(dev);
606
607 if( bionet_debug > 0 && (lp->open_time++ & 7) == 8 )
608 printk("bionet_tick: %ld\n", lp->open_time);
609
610 if( !stdma_islocked() ) bionet_poll_rx(dev);
611
612 bionet_timer.expires = jiffies + lp->poll_time;
613 add_timer(&bionet_timer);
614}
615
616/* The inverse routine to bionet_open().
617 */
618static int
619bionet_close(struct net_device *dev) {
620 struct net_local *lp = netdev_priv(dev);
621
622 if (bionet_debug > 0)
623 printk("bionet_close, open_time=%ld\n", lp->open_time);
624 del_timer(&bionet_timer);
625 stdma_lock(bionet_intr, NULL);
626
627 set_status(0);
628 lp->open_time = 0;
629
630 dev->tbusy = 1;
631 dev->start = 0;
632
633 stdma_release();
634 return 0;
635}
636
637/* Get the current statistics.
638 This may be called with the card open or closed.
639 */
640static struct net_device_stats *net_get_stats(struct net_device *dev)
641{
642 struct net_local *lp = netdev_priv(dev);
643 return &lp->stats;
644}
645
646
647#ifdef MODULE
648
649static struct net_device *bio_dev;
650
651int init_module(void)
652{
653 bio_dev = bionet_probe(-1);
654 if (IS_ERR(bio_dev))
655 return PTR_ERR(bio_dev);
656 return 0;
657}
658
659void cleanup_module(void)
660{
661 unregister_netdev(bio_dev);
662 free_netdev(bio_dev);
663}
664
665#endif /* MODULE */
666
667/* Local variables:
668 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
669 -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
670 -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c bionet.c"
671 * version-control: t
672 * kept-new-versions: 5
673 * tab-width: 8
674 * End:
675 */
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c
deleted file mode 100644
index f7356374a2e7..000000000000
--- a/drivers/net/atari_pamsnet.c
+++ /dev/null
@@ -1,878 +0,0 @@
1/* atari_pamsnet.c PAMsNet device driver for linux68k.
2 *
3 * Version: @(#)PAMsNet.c 0.2ß 03/31/96
4 *
5 * Author: Torsten Lang <Torsten.Lang@ap.physik.uni-giessen.de>
6 * <Torsten.Lang@jung.de>
7 *
8 * This driver is based on my driver PAMSDMA.c for MiNT-Net and
9 * on the driver bionet.c written by
10 * Hartmut Laue <laue@ifk-mp.uni-kiel.de>
11 * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
12 *
13 * Little adaptions for integration into pl7 by Roman Hodek
14 *
15 What is it ?
16 ------------
17 This driver controls the PAMsNet LAN-Adapter which connects
18 an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
19
20 This version can be compiled as a loadable module (See the
21 compile command at the bottom of this file).
22 At load time, you can optionally set the debugging level and the
23 fastest response time on the command line of 'insmod'.
24
25 'pamsnet_debug'
26 controls the amount of diagnostic messages:
27 0 : no messages
28 >0 : see code for meaning of printed messages
29
30 'pamsnet_min_poll_time' (always >=1)
31 gives the time (in jiffies) between polls. Low values
32 increase the system load (beware!)
33
34 When loaded, a net device with the name 'eth?' becomes available,
35 which can be controlled with the usual 'ifconfig' command.
36
37 It is possible to compile this driver into the kernel like other
38 (net) drivers. For this purpose, some source files (e.g. config-files
39 makefiles, Space.c) must be changed accordingly. (You may refer to
40 other drivers how to do it.) In this case, the device will be detected
41 at boot time and (probably) appear as 'eth0'.
42
43 Theory of Operation
44 -------------------
45 Because the ATARI DMA port is usually shared between several
46 devices (eg. harddisk, floppy) we cannot block the ACSI bus
47 while waiting for interrupts. Therefore we use a polling mechanism
48 to fetch packets from the adapter. For the same reason, we send
49 packets without checking that the previous packet has been sent to
50 the LAN. We rely on the higher levels of the networking code to detect
51 missing packets and resend them.
52
53 Before we access the ATARI DMA controller, we check if another
54 process is using the DMA. If not, we lock the DMA, perform one or
55 more packet transfers and unlock the DMA before returning.
56 We do not use 'stdma_lock' unconditionally because it is unclear
57 if the networking code can be set to sleep, which will happen if
58 another (possibly slow) device is using the DMA controller.
59
60 The polling is done via timer interrupts which periodically
61 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
62 between polls varies depending on an estimate of the net activity.
63 The allowed range is given by the variable 'bionet_min_poll_time'
64 for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
65 for the higher (slowest) limit.
66
67 Whenever a packet arrives, we switch to fastest response by setting
68 the polling time to its lowest limit. If the following poll fails,
69 because no packets have arrived, we increase the time for the next
70 poll. When the net activity is low, the polling time effectively
71 stays at its maximum value, resulting in the lowest load for the
72 machine.
73 */
74
75#define MAX_POLL_TIME 10
76
77static char *version =
78 "pamsnet.c:v0.2beta 30-mar-96 (c) Torsten Lang.\n";
79
80#include <linux/module.h>
81
82#include <linux/kernel.h>
83#include <linux/jiffies.h>
84#include <linux/types.h>
85#include <linux/fcntl.h>
86#include <linux/interrupt.h>
87#include <linux/ioport.h>
88#include <linux/in.h>
89#include <linux/slab.h>
90#include <linux/string.h>
91#include <linux/bitops.h>
92#include <asm/system.h>
93#include <asm/pgtable.h>
94#include <asm/io.h>
95#include <asm/dma.h>
96#include <linux/errno.h>
97#include <asm/atarihw.h>
98#include <asm/atariints.h>
99#include <asm/atari_stdma.h>
100#include <asm/atari_acsi.h>
101
102#include <linux/delay.h>
103#include <linux/timer.h>
104#include <linux/init.h>
105
106#include <linux/netdevice.h>
107#include <linux/etherdevice.h>
108#include <linux/skbuff.h>
109
110#undef READ
111#undef WRITE
112
113/* use 0 for production, 1 for verification, >2 for debug
114 */
115#ifndef NET_DEBUG
116#define NET_DEBUG 0
117#endif
118/*
119 * Global variable 'pamsnet_debug'. Can be set at load time by 'insmod'
120 */
121unsigned int pamsnet_debug = NET_DEBUG;
122module_param(pamsnet_debug, int, 0);
123MODULE_PARM_DESC(pamsnet_debug, "pamsnet debug enable (0-1)");
124MODULE_LICENSE("GPL");
125
126static unsigned int pamsnet_min_poll_time = 2;
127
128
129/* Information that need to be kept for each board.
130 */
131struct net_local {
132 struct net_device_stats stats;
133 long open_time; /* for debugging */
134 int poll_time; /* polling time varies with net load */
135};
136
137static struct nic_pkt_s { /* packet format */
138 unsigned char buffer[2048];
139} *nic_packet = 0;
140unsigned char *phys_nic_packet;
141
142typedef unsigned char HADDR[6]; /* 6-byte hardware address of lance */
143
144/* Index to functions, as function prototypes.
145 */
146static void start (int target);
147static int stop (int target);
148static int testpkt (int target);
149static int sendpkt (int target, unsigned char *buffer, int length);
150static int receivepkt (int target, unsigned char *buffer);
151static int inquiry (int target, unsigned char *buffer);
152static HADDR *read_hw_addr(int target, unsigned char *buffer);
153static void setup_dma (void *address, unsigned rw_flag, int num_blocks);
154static int send_first (int target, unsigned char byte);
155static int send_1_5 (int lun, unsigned char *command, int dma);
156static int get_status (void);
157static int calc_received (void *start_address);
158
159static int pamsnet_open(struct net_device *dev);
160static int pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev);
161static void pamsnet_poll_rx(struct net_device *);
162static int pamsnet_close(struct net_device *dev);
163static struct net_device_stats *net_get_stats(struct net_device *dev);
164static void pamsnet_tick(unsigned long);
165
166static irqreturn_t pamsnet_intr(int irq, void *data);
167
168static DEFINE_TIMER(pamsnet_timer, pamsnet_tick, 0, 0);
169
170#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
171
172typedef struct
173{
174 unsigned char reserved1[0x38];
175 HADDR hwaddr;
176 unsigned char reserved2[0x1c2];
177} DMAHWADDR;
178
179/*
180 * Definitions of commands understood by the PAMs DMA adaptor.
181 *
182 * In general the DMA adaptor uses LUN 0, 5, 6 and 7 on one ID changeable
183 * by the PAM's Net software.
184 *
185 * LUN 0 works as a harddisk. You can boot the PAM's Net driver there.
186 * LUN 5 works as a harddisk and lets you access the RAM and some I/O HW
187 * area. In sector 0, bytes 0x38-0x3d you find the ethernet HW address
188 * of the adaptor.
189 * LUN 6 works as a harddisk and lets you access the firmware ROM.
190 * LUN 7 lets you send and receive packets.
191 *
192 * Some commands like the INQUIRY command work identical on all used LUNs.
193 *
194 * UNKNOWN1 seems to read some data.
195 * Command length is 6 bytes.
196 * UNKNOWN2 seems to read some data (command byte 1 must be !=0). The
197 * following bytes seem to be something like an allocation length.
198 * Command length is 6 bytes.
199 * READPKT reads a packet received by the DMA adaptor.
200 * Command length is 6 bytes.
201 * WRITEPKT sends a packet transferred by the following DMA phase. The length
202 * of the packet is transferred in command bytes 3 and 4.
203 * The adaptor automatically replaces the src hw address in an ethernet
204 * packet by its own hw address.
205 * Command length is 6 bytes.
206 * INQUIRY has the same function as the INQUIRY command supported by harddisks
207 * and other SCSI devices. It lets you detect which device you found
208 * at a given address.
209 * Command length is 6 bytes.
210 * START initializes the DMA adaptor. After this command it is able to send
211 * and receive packets. There is no status byte returned!
212 * Command length is 1 byte.
213 * NUMPKTS gives back the number of received packets waiting in the queue in
214 * the status byte.
215 * Command length is 1 byte.
216 * UNKNOWN3
217 * UNKNOWN4 Function of these three commands is unknown.
218 * UNKNOWN5 The command length of these three commands is 1 byte.
219 * DESELECT immediately deselects the DMA adaptor. May important with interrupt
220 * driven operation.
221 * Command length is 1 byte.
222 * STOP resets the DMA adaptor. After this command packets can no longer
223 * be received or transferred.
224 * Command length is 6 byte.
225 */
226
227enum {UNKNOWN1=3, READPKT=8, UNKNOWN2, WRITEPKT=10, INQUIRY=18, START,
228 NUMPKTS=22, UNKNOWN3, UNKNOWN4, UNKNOWN5, DESELECT, STOP};
229
230#define READSECTOR READPKT
231#define WRITESECTOR WRITEPKT
232
233u_char *inquire8="MV PAM's NET/GK";
234
235#define DMALOW dma_wd.dma_lo
236#define DMAMID dma_wd.dma_md
237#define DMAHIGH dma_wd.dma_hi
238#define DACCESS dma_wd.fdc_acces_seccount
239
240#define MFP_GPIP mfp.par_dt_reg
241
242/* Some useful functions */
243
244#define INT (!(MFP_GPIP & 0x20))
245#define DELAY ({MFP_GPIP; MFP_GPIP; MFP_GPIP;})
246#define WRITEMODE(value) \
247 ({ u_short dummy = value; \
248 __asm__ volatile("movew %0, 0xFFFF8606" : : "d"(dummy)); \
249 DELAY; \
250 })
251#define WRITEBOTH(value1, value2) \
252 ({ u_long dummy = (u_long)(value1)<<16 | (u_short)(value2); \
253 __asm__ volatile("movel %0, 0xFFFF8604" : : "d"(dummy)); \
254 DELAY; \
255 })
256
257/* Definitions for DMODE */
258
259#define READ 0x000
260#define WRITE 0x100
261
262#define DMA_FDC 0x080
263#define DMA_ACSI 0x000
264
265#define DMA_DISABLE 0x040
266
267#define SEC_COUNT 0x010
268#define DMA_WINDOW 0x000
269
270#define REG_ACSI 0x008
271#define REG_FDC 0x000
272
273#define A1 0x002
274
275/* Timeout constants */
276
277#define TIMEOUTCMD HZ/2 /* ca. 500ms */
278#define TIMEOUTDMA HZ /* ca. 1s */
279#define COMMAND_DELAY 500 /* ca. 0.5ms */
280
281unsigned rw;
282int lance_target = -1;
283int if_up = 0;
284
285/* The following routines access the ethernet board connected to the
286 * ACSI port via the st_dma chip.
287 */
288
289/* The following lowlevel routines work on physical addresses only and assume
290 * that eventually needed buffers are
291 * - completely located in ST RAM
292 * - are contigous in the physical address space
293 */
294
295/* Setup the DMA counter */
296
297static void
298setup_dma (void *address, unsigned rw_flag, int num_blocks)
299{
300 WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
301 A1);
302 WRITEMODE((unsigned)(rw_flag ^ WRITE) | DMA_FDC | SEC_COUNT | REG_ACSI |
303 A1);
304 WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
305 A1);
306 DMALOW = (unsigned char)((unsigned long)address & 0xFF);
307 DMAMID = (unsigned char)(((unsigned long)address >> 8) & 0xFF);
308 DMAHIGH = (unsigned char)(((unsigned long)address >> 16) & 0xFF);
309 WRITEBOTH((unsigned)num_blocks & 0xFF,
310 rw_flag | DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
311 rw = rw_flag;
312}
313
314/* Send the first byte of an command block */
315
316static int
317send_first (int target, unsigned char byte)
318{
319 rw = READ;
320 acsi_delay_end(COMMAND_DELAY);
321 /*
322 * wake up ACSI
323 */
324 WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI);
325 /*
326 * write command byte
327 */
328 WRITEBOTH((target << 5) | (byte & 0x1F), DMA_FDC |
329 DMA_WINDOW | REG_ACSI | A1);
330 return (!acsi_wait_for_IRQ(TIMEOUTCMD));
331}
332
333/* Send the rest of an command block */
334
335static int
336send_1_5 (int lun, unsigned char *command, int dma)
337{
338 int i, j;
339
340 for (i=0; i<5; i++) {
341 WRITEBOTH((!i ? (((lun & 0x7) << 5) | (command[i] & 0x1F))
342 : command[i]),
343 rw | REG_ACSI | DMA_WINDOW |
344 ((i < 4) ? DMA_FDC
345 : (dma ? DMA_ACSI
346 : DMA_FDC)) | A1);
347 if (i < 4 && (j = !acsi_wait_for_IRQ(TIMEOUTCMD)))
348 return (j);
349 }
350 return (0);
351}
352
353/* Read a status byte */
354
355static int
356get_status (void)
357{
358 WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
359 acsi_delay_start();
360 return ((int)(DACCESS & 0xFF));
361}
362
363/* Calculate the number of received bytes */
364
365static int
366calc_received (void *start_address)
367{
368 return (int)(
369 (((unsigned long)DMAHIGH << 16) | ((unsigned)DMAMID << 8) | DMALOW)
370 - (unsigned long)start_address);
371}
372
373/* The following midlevel routines still work on physical addresses ... */
374
375/* start() starts the PAM's DMA adaptor */
376
377static void
378start (int target)
379{
380 send_first(target, START);
381}
382
383/* stop() stops the PAM's DMA adaptor and returns a value of zero in case of success */
384
385static int
386stop (int target)
387{
388 int ret = -1;
389 unsigned char cmd_buffer[5];
390
391 if (send_first(target, STOP))
392 goto bad;
393 cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] =
394 cmd_buffer[3] = cmd_buffer[4] = 0;
395 if (send_1_5(7, cmd_buffer, 0) ||
396 !acsi_wait_for_IRQ(TIMEOUTDMA) ||
397 get_status())
398 goto bad;
399 ret = 0;
400bad:
401 return (ret);
402}
403
404/* testpkt() returns the number of received packets waiting in the queue */
405
406static int
407testpkt(int target)
408{
409 int ret = -1;
410
411 if (send_first(target, NUMPKTS))
412 goto bad;
413 ret = get_status();
414bad:
415 return (ret);
416}
417
418/* inquiry() returns 0 when PAM's DMA found, -1 when timeout, -2 otherwise */
419/* Please note: The buffer is for internal use only but must be defined! */
420
421static int
422inquiry (int target, unsigned char *buffer)
423{
424 int ret = -1;
425 unsigned char *vbuffer = phys_to_virt((unsigned long)buffer);
426 unsigned char cmd_buffer[5];
427
428 if (send_first(target, INQUIRY))
429 goto bad;
430 setup_dma(buffer, READ, 1);
431 vbuffer[8] = vbuffer[27] = 0; /* Avoid confusion with previous read data */
432 cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
433 cmd_buffer[3] = 48;
434 if (send_1_5(5, cmd_buffer, 1) ||
435 !acsi_wait_for_IRQ(TIMEOUTDMA) ||
436 get_status() ||
437 (calc_received(buffer) < 32))
438 goto bad;
439 dma_cache_maintenance((unsigned long)(buffer+8), 20, 0);
440 if (memcmp(inquire8, vbuffer+8, 20))
441 goto bad;
442 ret = 0;
443bad:
444 if (!!NET_DEBUG) {
445 vbuffer[8+20]=0;
446 printk("inquiry of target %d: %s\n", target, vbuffer+8);
447 }
448 return (ret);
449}
450
451/*
452 * read_hw_addr() reads the sector containing the hwaddr and returns
453 * a pointer to it (virtual address!) or 0 in case of an error
454 */
455
456static HADDR
457*read_hw_addr(int target, unsigned char *buffer)
458{
459 HADDR *ret = 0;
460 unsigned char cmd_buffer[5];
461
462 if (send_first(target, READSECTOR))
463 goto bad;
464 setup_dma(buffer, READ, 1);
465 cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
466 cmd_buffer[3] = 1;
467 if (send_1_5(5, cmd_buffer, 1) ||
468 !acsi_wait_for_IRQ(TIMEOUTDMA) ||
469 get_status())
470 goto bad;
471 ret = phys_to_virt((unsigned long)&(((DMAHWADDR *)buffer)->hwaddr));
472 dma_cache_maintenance((unsigned long)buffer, 512, 0);
473bad:
474 return (ret);
475}
476
477static irqreturn_t
478pamsnet_intr(int irq, void *data)
479{
480 return IRQ_HANDLED;
481}
482
483/* receivepkt() loads a packet to a given buffer and returns its length */
484
485static int
486receivepkt (int target, unsigned char *buffer)
487{
488 int ret = -1;
489 unsigned char cmd_buffer[5];
490
491 if (send_first(target, READPKT))
492 goto bad;
493 setup_dma(buffer, READ, 3);
494 cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
495 cmd_buffer[3] = 3;
496 if (send_1_5(7, cmd_buffer, 1) ||
497 !acsi_wait_for_IRQ(TIMEOUTDMA) ||
498 get_status())
499 goto bad;
500 ret = calc_received(buffer);
501bad:
502 return (ret);
503}
504
505/* sendpkt() sends a packet and returns a value of zero when the packet was sent
506 successfully */
507
508static int
509sendpkt (int target, unsigned char *buffer, int length)
510{
511 int ret = -1;
512 unsigned char cmd_buffer[5];
513
514 if (send_first(target, WRITEPKT))
515 goto bad;
516 setup_dma(buffer, WRITE, 3);
517 cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[4] = 0;
518 cmd_buffer[2] = length >> 8;
519 cmd_buffer[3] = length & 0xFF;
520 if (send_1_5(7, cmd_buffer, 1) ||
521 !acsi_wait_for_IRQ(TIMEOUTDMA) ||
522 get_status())
523 goto bad;
524 ret = 0;
525bad:
526 return (ret);
527}
528
529/* The following higher level routines work on virtual addresses and convert them to
530 * physical addresses when passed to the lowlevel routines. It's up to the higher level
531 * routines to copy data from Alternate RAM to ST RAM if neccesary!
532 */
533
534/* Check for a network adaptor of this type, and return '0' if one exists.
535 */
536
537struct net_device * __init pamsnet_probe (int unit)
538{
539 struct net_device *dev;
540 int i;
541 HADDR *hwaddr;
542 int err;
543
544 unsigned char station_addr[6];
545 static unsigned version_printed;
546 /* avoid "Probing for..." printed 4 times - the driver is supporting only one adapter now! */
547 static int no_more_found;
548
549 if (no_more_found)
550 return ERR_PTR(-ENODEV);
551 no_more_found = 1;
552
553 dev = alloc_etherdev(sizeof(struct net_local));
554 if (!dev)
555 return ERR_PTR(-ENOMEM);
556 if (unit >= 0) {
557 sprintf(dev->name, "eth%d", unit);
558 netdev_boot_setup_check(dev);
559 }
560 SET_MODULE_OWNER(dev);
561
562 printk("Probing for PAM's Net/GK Adapter...\n");
563
564 /* Allocate the DMA buffer here since we need it for probing! */
565
566 nic_packet = (struct nic_pkt_s *)acsi_buffer;
567 phys_nic_packet = (unsigned char *)phys_acsi_buffer;
568 if (pamsnet_debug > 0) {
569 printk("nic_packet at 0x%p, phys at 0x%p\n",
570 nic_packet, phys_nic_packet );
571 }
572
573 stdma_lock(pamsnet_intr, NULL);
574 DISABLE_IRQ();
575
576 for (i=0; i<8; i++) {
577 /* Do two inquiries to cover cases with strange equipment on previous ID */
578 /* blocking the ACSI bus (like the SLMC804 laser printer controller... */
579 inquiry(i, phys_nic_packet);
580 if (!inquiry(i, phys_nic_packet)) {
581 lance_target = i;
582 break;
583 }
584 }
585
586 if (!!NET_DEBUG)
587 printk("ID: %d\n",i);
588
589 if (lance_target >= 0) {
590 if (!(hwaddr = read_hw_addr(lance_target, phys_nic_packet)))
591 lance_target = -1;
592 else
593 memcpy (station_addr, hwaddr, ETH_ALEN);
594 }
595
596 ENABLE_IRQ();
597 stdma_release();
598
599 if (lance_target < 0) {
600 printk("No PAM's Net/GK found.\n");
601 free_netdev(dev);
602 return ERR_PTR(-ENODEV);
603 }
604
605 if (pamsnet_debug > 0 && version_printed++ == 0)
606 printk(version);
607
608 printk("%s: %s found on target %01d, eth-addr: %02x:%02x:%02x:%02x:%02x:%02x.\n",
609 dev->name, "PAM's Net/GK", lance_target,
610 station_addr[0], station_addr[1], station_addr[2],
611 station_addr[3], station_addr[4], station_addr[5]);
612
613 /* Initialize the device structure. */
614 dev->open = pamsnet_open;
615 dev->stop = pamsnet_close;
616 dev->hard_start_xmit = pamsnet_send_packet;
617 dev->get_stats = net_get_stats;
618
619 /* Fill in the fields of the device structure with ethernet-generic
620 * values. This should be in a common file instead of per-driver.
621 */
622
623 for (i = 0; i < ETH_ALEN; i++) {
624#if 0
625 dev->broadcast[i] = 0xff;
626#endif
627 dev->dev_addr[i] = station_addr[i];
628 }
629 err = register_netdev(dev);
630 if (!err)
631 return dev;
632
633 free_netdev(dev);
634 return ERR_PTR(err);
635}
636
637/* Open/initialize the board. This is called (in the current kernel)
638 sometime after booting when the 'ifconfig' program is run.
639
640 This routine should set everything up anew at each open, even
641 registers that "should" only need to be set once at boot, so that
642 there is non-reboot way to recover if something goes wrong.
643 */
644static int
645pamsnet_open(struct net_device *dev)
646{
647 struct net_local *lp = netdev_priv(dev);
648
649 if (pamsnet_debug > 0)
650 printk("pamsnet_open\n");
651 stdma_lock(pamsnet_intr, NULL);
652 DISABLE_IRQ();
653
654 /* Reset the hardware here.
655 */
656 if (!if_up)
657 start(lance_target);
658 if_up = 1;
659 lp->open_time = 0; /*jiffies*/
660 lp->poll_time = MAX_POLL_TIME;
661
662 dev->tbusy = 0;
663 dev->interrupt = 0;
664 dev->start = 1;
665
666 ENABLE_IRQ();
667 stdma_release();
668 pamsnet_timer.data = (long)dev;
669 pamsnet_timer.expires = jiffies + lp->poll_time;
670 add_timer(&pamsnet_timer);
671 return 0;
672}
673
674static int
675pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev)
676{
677 struct net_local *lp = netdev_priv(dev);
678 unsigned long flags;
679
680 /* Block a timer-based transmit from overlapping. This could better be
681 * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
682 */
683 local_irq_save(flags);
684
685 if (stdma_islocked()) {
686 local_irq_restore(flags);
687 lp->stats.tx_errors++;
688 }
689 else {
690 int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
691 unsigned long buf = virt_to_phys(skb->data);
692 int stat;
693
694 stdma_lock(pamsnet_intr, NULL);
695 DISABLE_IRQ();
696
697 local_irq_restore(flags);
698 if( !STRAM_ADDR(buf+length-1) ) {
699 skb_copy_from_linear_data(skb, nic_packet->buffer,
700 length);
701 buf = (unsigned long)phys_nic_packet;
702 }
703
704 dma_cache_maintenance(buf, length, 1);
705
706 stat = sendpkt(lance_target, (unsigned char *)buf, length);
707 ENABLE_IRQ();
708 stdma_release();
709
710 dev->trans_start = jiffies;
711 dev->tbusy = 0;
712 lp->stats.tx_packets++;
713 lp->stats.tx_bytes+=length;
714 }
715 dev_kfree_skb(skb);
716
717 return 0;
718}
719
720/* We have a good packet(s), get it/them out of the buffers.
721 */
722static void
723pamsnet_poll_rx(struct net_device *dev)
724{
725 struct net_local *lp = netdev_priv(dev);
726 int boguscount;
727 int pkt_len;
728 struct sk_buff *skb;
729 unsigned long flags;
730
731 local_irq_save(flags);
732 /* ++roman: Take care at locking the ST-DMA... This must be done with ints
733 * off, since otherwise an int could slip in between the question and the
734 * locking itself, and then we'd go to sleep... And locking itself is
735 * necessary to keep the floppy_change timer from working with ST-DMA
736 * registers. */
737 if (stdma_islocked()) {
738 local_irq_restore(flags);
739 return;
740 }
741 stdma_lock(pamsnet_intr, NULL);
742 DISABLE_IRQ();
743 local_irq_restore(flags);
744
745 boguscount = testpkt(lance_target);
746 if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
747
748 while(boguscount--) {
749 pkt_len = receivepkt(lance_target, phys_nic_packet);
750
751 if( pkt_len < 60 ) break;
752
753 /* Good packet... */
754
755 dma_cache_maintenance((unsigned long)phys_nic_packet, pkt_len, 0);
756
757 lp->poll_time = pamsnet_min_poll_time; /* fast poll */
758 if( pkt_len >= 60 && pkt_len <= 2048 ) {
759 if (pkt_len > 1514)
760 pkt_len = 1514;
761
762 /* Malloc up new buffer.
763 */
764 skb = alloc_skb(pkt_len, GFP_ATOMIC);
765 if (skb == NULL) {
766 printk("%s: Memory squeeze, dropping packet.\n",
767 dev->name);
768 lp->stats.rx_dropped++;
769 break;
770 }
771 skb->len = pkt_len;
772 skb->dev = dev;
773
774 /* 'skb->data' points to the start of sk_buff data area.
775 */
776 skb_copy_to_linear_data(skb, nic_packet->buffer,
777 pkt_len);
778 netif_rx(skb);
779 dev->last_rx = jiffies;
780 lp->stats.rx_packets++;
781 lp->stats.rx_bytes+=pkt_len;
782 }
783 }
784
785 /* If any worth-while packets have been received, dev_rint()
786 has done a mark_bh(INET_BH) for us and will work on them
787 when we get to the bottom-half routine.
788 */
789
790 ENABLE_IRQ();
791 stdma_release();
792 return;
793}
794
795/* pamsnet_tick: called by pamsnet_timer. Reads packets from the adapter,
796 * passes them to the higher layers and restarts the timer.
797 */
798static void
799pamsnet_tick(unsigned long data)
800{
801 struct net_device *dev = (struct net_device *)data;
802 struct net_local *lp = netdev_priv(dev);
803
804 if( pamsnet_debug > 0 && (lp->open_time++ & 7) == 8 )
805 printk("pamsnet_tick: %ld\n", lp->open_time);
806
807 pamsnet_poll_rx(dev);
808
809 pamsnet_timer.expires = jiffies + lp->poll_time;
810 add_timer(&pamsnet_timer);
811}
812
813/* The inverse routine to pamsnet_open().
814 */
815static int
816pamsnet_close(struct net_device *dev)
817{
818 struct net_local *lp = netdev_priv(dev);
819
820 if (pamsnet_debug > 0)
821 printk("pamsnet_close, open_time=%ld\n", lp->open_time);
822 del_timer(&pamsnet_timer);
823 stdma_lock(pamsnet_intr, NULL);
824 DISABLE_IRQ();
825
826 if (if_up)
827 stop(lance_target);
828 if_up = 0;
829
830 lp->open_time = 0;
831
832 dev->tbusy = 1;
833 dev->start = 0;
834
835 ENABLE_IRQ();
836 stdma_release();
837 return 0;
838}
839
840/* Get the current statistics.
841 This may be called with the card open or closed.
842 */
843static struct net_device_stats *net_get_stats(struct net_device *dev)
844{
845 struct net_local *lp = netdev_priv(dev);
846 return &lp->stats;
847}
848
849
850#ifdef MODULE
851
852static struct net_device *pam_dev;
853
854int init_module(void)
855{
856 pam_dev = pamsnet_probe(-1);
857 if (IS_ERR(pam_dev))
858 return PTR_ERR(pam_dev);
859 return 0;
860}
861
862void cleanup_module(void)
863{
864 unregister_netdev(pam_dev);
865 free_netdev(pam_dev);
866}
867
868#endif /* MODULE */
869
870/* Local variables:
871 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
872 -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
873 -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c atari_pamsnet.c"
874 * version-control: t
875 * kept-new-versions: 5
876 * tab-width: 8
877 * End:
878 */
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
index 991c8b93d386..ff4765f6c3de 100644
--- a/drivers/net/atl1/atl1.h
+++ b/drivers/net/atl1/atl1.h
@@ -230,7 +230,6 @@ struct atl1_hw {
230 u32 min_frame_size; 230 u32 min_frame_size;
231 231
232 u16 dev_rev; 232 u16 dev_rev;
233 u8 revision_id;
234 233
235 /* spi flash */ 234 /* spi flash */
236 u8 flash_vendor; 235 u8 flash_vendor;
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index f7ac4758d51c..4a18b881ae9a 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -118,10 +118,6 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
118{ 118{
119 struct atl1_hw *hw = &adapter->hw; 119 struct atl1_hw *hw = &adapter->hw;
120 struct net_device *netdev = adapter->netdev; 120 struct net_device *netdev = adapter->netdev;
121 struct pci_dev *pdev = adapter->pdev;
122
123 /* PCI config space info */
124 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
125 121
126 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 122 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
127 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 123 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c27cfcef45fa..e86b3691765b 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev)
1205 continue; 1205 continue;
1206 } 1206 }
1207 skb_reserve(skb, 2); /* 16 byte IP header align */ 1207 skb_reserve(skb, 2); /* 16 byte IP header align */
1208 eth_copy_and_sum(skb, 1208 skb_copy_to_linear_data(skb,
1209 (unsigned char *)pDB->vaddr, frmlen, 0); 1209 (unsigned char *)pDB->vaddr, frmlen);
1210 skb_put(skb, frmlen); 1210 skb_put(skb, frmlen);
1211 skb->protocol = eth_type_trans(skb, dev); 1211 skb->protocol = eth_type_trans(skb, dev);
1212 netif_rx(skb); /* pass the packet to upper layers */ 1212 netif_rx(skb); /* pass the packet to upper layers */
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index d19874bf0706..1d882360b34d 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -459,7 +459,7 @@ static int ax_open(struct net_device *dev)
459 struct ei_device *ei_local = netdev_priv(dev); 459 struct ei_device *ei_local = netdev_priv(dev);
460 int ret; 460 int ret;
461 461
462 dev_dbg(ax->dev, "%s: open\n", dev->name); 462 dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
463 463
464 ret = request_irq(dev->irq, ax_ei_interrupt, 0, dev->name, dev); 464 ret = request_irq(dev->irq, ax_ei_interrupt, 0, dev->name, dev);
465 if (ret) 465 if (ret)
@@ -492,7 +492,7 @@ static int ax_close(struct net_device *dev)
492 struct ax_device *ax = to_ax_dev(dev); 492 struct ax_device *ax = to_ax_dev(dev);
493 struct ei_device *ei_local = netdev_priv(dev); 493 struct ei_device *ei_local = netdev_priv(dev);
494 494
495 dev_dbg(ax->dev, "%s: close\n", dev->name); 495 dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
496 496
497 /* turn the phy off */ 497 /* turn the phy off */
498 498
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ce3ed67a878e..d23861c8658c 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
54 54
55#define DRV_MODULE_NAME "bnx2" 55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": " 56#define PFX DRV_MODULE_NAME ": "
57#define DRV_MODULE_VERSION "1.5.11" 57#define DRV_MODULE_VERSION "1.6.2"
58#define DRV_MODULE_RELDATE "June 4, 2007" 58#define DRV_MODULE_RELDATE "July 6, 2007"
59 59
60#define RUN_AT(x) (jiffies + (x)) 60#define RUN_AT(x) (jiffies + (x))
61 61
@@ -550,6 +550,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
550{ 550{
551 u32 fw_link_status = 0; 551 u32 fw_link_status = 0;
552 552
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
553 if (bp->link_up) { 556 if (bp->link_up) {
554 u32 bmsr; 557 u32 bmsr;
555 558
@@ -601,12 +604,21 @@ bnx2_report_fw_link(struct bnx2 *bp)
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status); 604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602} 605}
603 606
607static char *
608bnx2_xceiver_str(struct bnx2 *bp)
609{
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612 "Copper"));
613}
614
604static void 615static void
605bnx2_report_link(struct bnx2 *bp) 616bnx2_report_link(struct bnx2 *bp)
606{ 617{
607 if (bp->link_up) { 618 if (bp->link_up) {
608 netif_carrier_on(bp->dev); 619 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
610 622
611 printk("%d Mbps ", bp->line_speed); 623 printk("%d Mbps ", bp->line_speed);
612 624
@@ -630,7 +642,8 @@ bnx2_report_link(struct bnx2 *bp)
630 } 642 }
631 else { 643 else {
632 netif_carrier_off(bp->dev); 644 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
634 } 647 }
635 648
636 bnx2_report_fw_link(bp); 649 bnx2_report_fw_link(bp);
@@ -1100,6 +1113,9 @@ bnx2_set_link(struct bnx2 *bp)
1100 return 0; 1113 return 0;
1101 } 1114 }
1102 1115
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117 return 0;
1118
1103 link_up = bp->link_up; 1119 link_up = bp->link_up;
1104 1120
1105 bnx2_enable_bmsr1(bp); 1121 bnx2_enable_bmsr1(bp);
@@ -1210,12 +1226,74 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
1210 return adv; 1226 return adv;
1211} 1227}
1212 1228
1229static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
1213static int 1231static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp) 1232bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233{
1234 u32 speed_arg = 0, pause_adv;
1235
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 } else {
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 else
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267 }
1268 }
1269
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1284
1285 return 0;
1286}
1287
1288static int
1289bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1215{ 1290{
1216 u32 adv, bmcr; 1291 u32 adv, bmcr;
1217 u32 new_adv = 0; 1292 u32 new_adv = 0;
1218 1293
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1296
1219 if (!(bp->autoneg & AUTONEG_SPEED)) { 1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr; 1298 u32 new_bmcr;
1221 int force_link_down = 0; 1299 int force_link_down = 0;
@@ -1323,7 +1401,9 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
1323} 1401}
1324 1402
1325#define ETHTOOL_ALL_FIBRE_SPEED \ 1403#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full) 1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
1327 1407
1328#define ETHTOOL_ALL_COPPER_SPEED \ 1408#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ 1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
@@ -1335,6 +1415,188 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
1335 1415
1336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) 1416#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337 1417
1418static void
1419bnx2_set_default_remote_link(struct bnx2 *bp)
1420{
1421 u32 link;
1422
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 else
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1444 } else {
1445 bp->autoneg = 0;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1452 }
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1462 }
1463}
1464
1465static void
1466bnx2_set_default_link(struct bnx2 *bp)
1467{
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1470
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1474 u32 reg;
1475
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->autoneg = 0;
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1484 }
1485 } else
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487}
1488
1489static void
1490bnx2_send_heart_beat(struct bnx2 *bp)
1491{
1492 u32 msg;
1493 u32 addr;
1494
1495 spin_lock(&bp->indirect_lock);
1496 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1497 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1499 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1500 spin_unlock(&bp->indirect_lock);
1501}
1502
1503static void
1504bnx2_remote_phy_event(struct bnx2 *bp)
1505{
1506 u32 msg;
1507 u8 link_up = bp->link_up;
1508 u8 old_port;
1509
1510 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1511
1512 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1513 bnx2_send_heart_beat(bp);
1514
1515 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1516
1517 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1518 bp->link_up = 0;
1519 else {
1520 u32 speed;
1521
1522 bp->link_up = 1;
1523 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1524 bp->duplex = DUPLEX_FULL;
1525 switch (speed) {
1526 case BNX2_LINK_STATUS_10HALF:
1527 bp->duplex = DUPLEX_HALF;
1528 case BNX2_LINK_STATUS_10FULL:
1529 bp->line_speed = SPEED_10;
1530 break;
1531 case BNX2_LINK_STATUS_100HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_100BASE_T4:
1534 case BNX2_LINK_STATUS_100FULL:
1535 bp->line_speed = SPEED_100;
1536 break;
1537 case BNX2_LINK_STATUS_1000HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_1000FULL:
1540 bp->line_speed = SPEED_1000;
1541 break;
1542 case BNX2_LINK_STATUS_2500HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_2500FULL:
1545 bp->line_speed = SPEED_2500;
1546 break;
1547 default:
1548 bp->line_speed = 0;
1549 break;
1550 }
1551
1552 spin_lock(&bp->phy_lock);
1553 bp->flow_ctrl = 0;
1554 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1555 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1556 if (bp->duplex == DUPLEX_FULL)
1557 bp->flow_ctrl = bp->req_flow_ctrl;
1558 } else {
1559 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1560 bp->flow_ctrl |= FLOW_CTRL_TX;
1561 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1562 bp->flow_ctrl |= FLOW_CTRL_RX;
1563 }
1564
1565 old_port = bp->phy_port;
1566 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1567 bp->phy_port = PORT_FIBRE;
1568 else
1569 bp->phy_port = PORT_TP;
1570
1571 if (old_port != bp->phy_port)
1572 bnx2_set_default_link(bp);
1573
1574 spin_unlock(&bp->phy_lock);
1575 }
1576 if (bp->link_up != link_up)
1577 bnx2_report_link(bp);
1578
1579 bnx2_set_mac_link(bp);
1580}
1581
1582static int
1583bnx2_set_remote_link(struct bnx2 *bp)
1584{
1585 u32 evt_code;
1586
1587 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1588 switch (evt_code) {
1589 case BNX2_FW_EVT_CODE_LINK_EVENT:
1590 bnx2_remote_phy_event(bp);
1591 break;
1592 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1593 default:
1594 bnx2_send_heart_beat(bp);
1595 break;
1596 }
1597 return 0;
1598}
1599
1338static int 1600static int
1339bnx2_setup_copper_phy(struct bnx2 *bp) 1601bnx2_setup_copper_phy(struct bnx2 *bp)
1340{ 1602{
@@ -1433,13 +1695,13 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
1433} 1695}
1434 1696
1435static int 1697static int
1436bnx2_setup_phy(struct bnx2 *bp) 1698bnx2_setup_phy(struct bnx2 *bp, u8 port)
1437{ 1699{
1438 if (bp->loopback == MAC_LOOPBACK) 1700 if (bp->loopback == MAC_LOOPBACK)
1439 return 0; 1701 return 0;
1440 1702
1441 if (bp->phy_flags & PHY_SERDES_FLAG) { 1703 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp)); 1704 return (bnx2_setup_serdes_phy(bp, port));
1443 } 1705 }
1444 else { 1706 else {
1445 return (bnx2_setup_copper_phy(bp)); 1707 return (bnx2_setup_copper_phy(bp));
@@ -1659,6 +1921,9 @@ bnx2_init_phy(struct bnx2 *bp)
1659 1921
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 1922 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661 1923
1924 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1925 goto setup_phy;
1926
1662 bnx2_read_phy(bp, MII_PHYSID1, &val); 1927 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16; 1928 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val); 1929 bnx2_read_phy(bp, MII_PHYSID2, &val);
@@ -1676,7 +1941,9 @@ bnx2_init_phy(struct bnx2 *bp)
1676 rc = bnx2_init_copper_phy(bp); 1941 rc = bnx2_init_copper_phy(bp);
1677 } 1942 }
1678 1943
1679 bnx2_setup_phy(bp); 1944setup_phy:
1945 if (!rc)
1946 rc = bnx2_setup_phy(bp, bp->phy_port);
1680 1947
1681 return rc; 1948 return rc;
1682} 1949}
@@ -1984,6 +2251,9 @@ bnx2_phy_int(struct bnx2 *bp)
1984 bnx2_set_link(bp); 2251 bnx2_set_link(bp);
1985 spin_unlock(&bp->phy_lock); 2252 spin_unlock(&bp->phy_lock);
1986 } 2253 }
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2255 bnx2_set_remote_link(bp);
2256
1987} 2257}
1988 2258
1989static void 2259static void
@@ -2297,6 +2567,7 @@ bnx2_interrupt(int irq, void *dev_instance)
2297{ 2567{
2298 struct net_device *dev = dev_instance; 2568 struct net_device *dev = dev_instance;
2299 struct bnx2 *bp = netdev_priv(dev); 2569 struct bnx2 *bp = netdev_priv(dev);
2570 struct status_block *sblk = bp->status_blk;
2300 2571
2301 /* When using INTx, it is possible for the interrupt to arrive 2572 /* When using INTx, it is possible for the interrupt to arrive
2302 * at the CPU before the status block posted prior to the 2573 * at the CPU before the status block posted prior to the
@@ -2304,7 +2575,7 @@ bnx2_interrupt(int irq, void *dev_instance)
2304 * When using MSI, the MSI message will always complete after 2575 * When using MSI, the MSI message will always complete after
2305 * the status block write. 2576 * the status block write.
2306 */ 2577 */
2307 if ((bp->status_blk->status_idx == bp->last_status_idx) && 2578 if ((sblk->status_idx == bp->last_status_idx) &&
2308 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 2579 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2309 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 2580 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2310 return IRQ_NONE; 2581 return IRQ_NONE;
@@ -2313,16 +2584,25 @@ bnx2_interrupt(int irq, void *dev_instance)
2313 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 2584 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2314 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 2585 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2315 2586
2587 /* Read back to deassert IRQ immediately to avoid too many
2588 * spurious interrupts.
2589 */
2590 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2591
2316 /* Return here if interrupt is shared and is disabled. */ 2592 /* Return here if interrupt is shared and is disabled. */
2317 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2593 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2318 return IRQ_HANDLED; 2594 return IRQ_HANDLED;
2319 2595
2320 netif_rx_schedule(dev); 2596 if (netif_rx_schedule_prep(dev)) {
2597 bp->last_status_idx = sblk->status_idx;
2598 __netif_rx_schedule(dev);
2599 }
2321 2600
2322 return IRQ_HANDLED; 2601 return IRQ_HANDLED;
2323} 2602}
2324 2603
2325#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE 2604#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2605 STATUS_ATTN_BITS_TIMER_ABORT)
2326 2606
2327static inline int 2607static inline int
2328bnx2_has_work(struct bnx2 *bp) 2608bnx2_has_work(struct bnx2 *bp)
@@ -3562,6 +3842,36 @@ nvram_write_end:
3562 return rc; 3842 return rc;
3563} 3843}
3564 3844
3845static void
3846bnx2_init_remote_phy(struct bnx2 *bp)
3847{
3848 u32 val;
3849
3850 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3851 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3852 return;
3853
3854 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3855 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3856 return;
3857
3858 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3859 if (netif_running(bp->dev)) {
3860 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3861 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3863 val);
3864 }
3865 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3866
3867 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3868 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3869 bp->phy_port = PORT_FIBRE;
3870 else
3871 bp->phy_port = PORT_TP;
3872 }
3873}
3874
3565static int 3875static int
3566bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 3876bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3567{ 3877{
@@ -3642,6 +3952,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3642 if (rc) 3952 if (rc)
3643 return rc; 3953 return rc;
3644 3954
3955 spin_lock_bh(&bp->phy_lock);
3956 bnx2_init_remote_phy(bp);
3957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3958 bnx2_set_default_remote_link(bp);
3959 spin_unlock_bh(&bp->phy_lock);
3960
3645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 3961 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3646 /* Adjust the voltage regular to two steps lower. The default 3962 /* Adjust the voltage regular to two steps lower. The default
3647 * of this register is 0x0000000e. */ 3963 * of this register is 0x0000000e. */
@@ -3826,7 +4142,7 @@ bnx2_init_chip(struct bnx2 *bp)
3826 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 4142 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3827 0); 4143 0);
3828 4144
3829 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff); 4145 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
3830 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); 4146 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3831 4147
3832 udelay(20); 4148 udelay(20);
@@ -4069,8 +4385,8 @@ bnx2_init_nic(struct bnx2 *bp)
4069 4385
4070 spin_lock_bh(&bp->phy_lock); 4386 spin_lock_bh(&bp->phy_lock);
4071 bnx2_init_phy(bp); 4387 bnx2_init_phy(bp);
4072 spin_unlock_bh(&bp->phy_lock);
4073 bnx2_set_link(bp); 4388 bnx2_set_link(bp);
4389 spin_unlock_bh(&bp->phy_lock);
4074 return 0; 4390 return 0;
4075} 4391}
4076 4392
@@ -4600,6 +4916,9 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
4600static void 4916static void
4601bnx2_5708_serdes_timer(struct bnx2 *bp) 4917bnx2_5708_serdes_timer(struct bnx2 *bp)
4602{ 4918{
4919 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4920 return;
4921
4603 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) { 4922 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4604 bp->serdes_an_pending = 0; 4923 bp->serdes_an_pending = 0;
4605 return; 4924 return;
@@ -4631,7 +4950,6 @@ static void
4631bnx2_timer(unsigned long data) 4950bnx2_timer(unsigned long data)
4632{ 4951{
4633 struct bnx2 *bp = (struct bnx2 *) data; 4952 struct bnx2 *bp = (struct bnx2 *) data;
4634 u32 msg;
4635 4953
4636 if (!netif_running(bp->dev)) 4954 if (!netif_running(bp->dev))
4637 return; 4955 return;
@@ -4639,8 +4957,7 @@ bnx2_timer(unsigned long data)
4639 if (atomic_read(&bp->intr_sem) != 0) 4957 if (atomic_read(&bp->intr_sem) != 0)
4640 goto bnx2_restart_timer; 4958 goto bnx2_restart_timer;
4641 4959
4642 msg = (u32) ++bp->fw_drv_pulse_wr_seq; 4960 bnx2_send_heart_beat(bp);
4643 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4644 4961
4645 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT); 4962 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4646 4963
@@ -5083,17 +5400,25 @@ static int
5083bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 5400bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5084{ 5401{
5085 struct bnx2 *bp = netdev_priv(dev); 5402 struct bnx2 *bp = netdev_priv(dev);
5403 int support_serdes = 0, support_copper = 0;
5086 5404
5087 cmd->supported = SUPPORTED_Autoneg; 5405 cmd->supported = SUPPORTED_Autoneg;
5088 if (bp->phy_flags & PHY_SERDES_FLAG) { 5406 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5407 support_serdes = 1;
5408 support_copper = 1;
5409 } else if (bp->phy_port == PORT_FIBRE)
5410 support_serdes = 1;
5411 else
5412 support_copper = 1;
5413
5414 if (support_serdes) {
5089 cmd->supported |= SUPPORTED_1000baseT_Full | 5415 cmd->supported |= SUPPORTED_1000baseT_Full |
5090 SUPPORTED_FIBRE; 5416 SUPPORTED_FIBRE;
5091 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) 5417 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5092 cmd->supported |= SUPPORTED_2500baseX_Full; 5418 cmd->supported |= SUPPORTED_2500baseX_Full;
5093 5419
5094 cmd->port = PORT_FIBRE;
5095 } 5420 }
5096 else { 5421 if (support_copper) {
5097 cmd->supported |= SUPPORTED_10baseT_Half | 5422 cmd->supported |= SUPPORTED_10baseT_Half |
5098 SUPPORTED_10baseT_Full | 5423 SUPPORTED_10baseT_Full |
5099 SUPPORTED_100baseT_Half | 5424 SUPPORTED_100baseT_Half |
@@ -5101,9 +5426,10 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5101 SUPPORTED_1000baseT_Full | 5426 SUPPORTED_1000baseT_Full |
5102 SUPPORTED_TP; 5427 SUPPORTED_TP;
5103 5428
5104 cmd->port = PORT_TP;
5105 } 5429 }
5106 5430
5431 spin_lock_bh(&bp->phy_lock);
5432 cmd->port = bp->phy_port;
5107 cmd->advertising = bp->advertising; 5433 cmd->advertising = bp->advertising;
5108 5434
5109 if (bp->autoneg & AUTONEG_SPEED) { 5435 if (bp->autoneg & AUTONEG_SPEED) {
@@ -5121,6 +5447,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5121 cmd->speed = -1; 5447 cmd->speed = -1;
5122 cmd->duplex = -1; 5448 cmd->duplex = -1;
5123 } 5449 }
5450 spin_unlock_bh(&bp->phy_lock);
5124 5451
5125 cmd->transceiver = XCVR_INTERNAL; 5452 cmd->transceiver = XCVR_INTERNAL;
5126 cmd->phy_address = bp->phy_addr; 5453 cmd->phy_address = bp->phy_addr;
@@ -5136,6 +5463,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5136 u8 req_duplex = bp->req_duplex; 5463 u8 req_duplex = bp->req_duplex;
5137 u16 req_line_speed = bp->req_line_speed; 5464 u16 req_line_speed = bp->req_line_speed;
5138 u32 advertising = bp->advertising; 5465 u32 advertising = bp->advertising;
5466 int err = -EINVAL;
5467
5468 spin_lock_bh(&bp->phy_lock);
5469
5470 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5471 goto err_out_unlock;
5472
5473 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5474 goto err_out_unlock;
5139 5475
5140 if (cmd->autoneg == AUTONEG_ENABLE) { 5476 if (cmd->autoneg == AUTONEG_ENABLE) {
5141 autoneg |= AUTONEG_SPEED; 5477 autoneg |= AUTONEG_SPEED;
@@ -5148,44 +5484,41 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5148 (cmd->advertising == ADVERTISED_100baseT_Half) || 5484 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5149 (cmd->advertising == ADVERTISED_100baseT_Full)) { 5485 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5150 5486
5151 if (bp->phy_flags & PHY_SERDES_FLAG) 5487 if (cmd->port == PORT_FIBRE)
5152 return -EINVAL; 5488 goto err_out_unlock;
5153 5489
5154 advertising = cmd->advertising; 5490 advertising = cmd->advertising;
5155 5491
5156 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { 5492 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5157 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) 5493 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5158 return -EINVAL; 5494 (cmd->port == PORT_TP))
5159 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) { 5495 goto err_out_unlock;
5496 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5160 advertising = cmd->advertising; 5497 advertising = cmd->advertising;
5161 } 5498 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5162 else if (cmd->advertising == ADVERTISED_1000baseT_Half) { 5499 goto err_out_unlock;
5163 return -EINVAL;
5164 }
5165 else { 5500 else {
5166 if (bp->phy_flags & PHY_SERDES_FLAG) { 5501 if (cmd->port == PORT_FIBRE)
5167 advertising = ETHTOOL_ALL_FIBRE_SPEED; 5502 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5168 } 5503 else
5169 else {
5170 advertising = ETHTOOL_ALL_COPPER_SPEED; 5504 advertising = ETHTOOL_ALL_COPPER_SPEED;
5171 }
5172 } 5505 }
5173 advertising |= ADVERTISED_Autoneg; 5506 advertising |= ADVERTISED_Autoneg;
5174 } 5507 }
5175 else { 5508 else {
5176 if (bp->phy_flags & PHY_SERDES_FLAG) { 5509 if (cmd->port == PORT_FIBRE) {
5177 if ((cmd->speed != SPEED_1000 && 5510 if ((cmd->speed != SPEED_1000 &&
5178 cmd->speed != SPEED_2500) || 5511 cmd->speed != SPEED_2500) ||
5179 (cmd->duplex != DUPLEX_FULL)) 5512 (cmd->duplex != DUPLEX_FULL))
5180 return -EINVAL; 5513 goto err_out_unlock;
5181 5514
5182 if (cmd->speed == SPEED_2500 && 5515 if (cmd->speed == SPEED_2500 &&
5183 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) 5516 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5184 return -EINVAL; 5517 goto err_out_unlock;
5185 }
5186 else if (cmd->speed == SPEED_1000) {
5187 return -EINVAL;
5188 } 5518 }
5519 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5520 goto err_out_unlock;
5521
5189 autoneg &= ~AUTONEG_SPEED; 5522 autoneg &= ~AUTONEG_SPEED;
5190 req_line_speed = cmd->speed; 5523 req_line_speed = cmd->speed;
5191 req_duplex = cmd->duplex; 5524 req_duplex = cmd->duplex;
@@ -5197,13 +5530,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5197 bp->req_line_speed = req_line_speed; 5530 bp->req_line_speed = req_line_speed;
5198 bp->req_duplex = req_duplex; 5531 bp->req_duplex = req_duplex;
5199 5532
5200 spin_lock_bh(&bp->phy_lock); 5533 err = bnx2_setup_phy(bp, cmd->port);
5201
5202 bnx2_setup_phy(bp);
5203 5534
5535err_out_unlock:
5204 spin_unlock_bh(&bp->phy_lock); 5536 spin_unlock_bh(&bp->phy_lock);
5205 5537
5206 return 0; 5538 return err;
5207} 5539}
5208 5540
5209static void 5541static void
@@ -5214,11 +5546,7 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5214 strcpy(info->driver, DRV_MODULE_NAME); 5546 strcpy(info->driver, DRV_MODULE_NAME);
5215 strcpy(info->version, DRV_MODULE_VERSION); 5547 strcpy(info->version, DRV_MODULE_VERSION);
5216 strcpy(info->bus_info, pci_name(bp->pdev)); 5548 strcpy(info->bus_info, pci_name(bp->pdev));
5217 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0'; 5549 strcpy(info->fw_version, bp->fw_version);
5218 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5219 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5220 info->fw_version[1] = info->fw_version[3] = '.';
5221 info->fw_version[5] = 0;
5222} 5550}
5223 5551
5224#define BNX2_REGDUMP_LEN (32 * 1024) 5552#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -5330,6 +5658,14 @@ bnx2_nway_reset(struct net_device *dev)
5330 5658
5331 spin_lock_bh(&bp->phy_lock); 5659 spin_lock_bh(&bp->phy_lock);
5332 5660
5661 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5662 int rc;
5663
5664 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5665 spin_unlock_bh(&bp->phy_lock);
5666 return rc;
5667 }
5668
5333 /* Force a link down visible on the other side */ 5669 /* Force a link down visible on the other side */
5334 if (bp->phy_flags & PHY_SERDES_FLAG) { 5670 if (bp->phy_flags & PHY_SERDES_FLAG) {
5335 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); 5671 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
@@ -5543,7 +5879,7 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5543 5879
5544 spin_lock_bh(&bp->phy_lock); 5880 spin_lock_bh(&bp->phy_lock);
5545 5881
5546 bnx2_setup_phy(bp); 5882 bnx2_setup_phy(bp, bp->phy_port);
5547 5883
5548 spin_unlock_bh(&bp->phy_lock); 5884 spin_unlock_bh(&bp->phy_lock);
5549 5885
@@ -5882,7 +6218,7 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
5882 struct bnx2 *bp = netdev_priv(dev); 6218 struct bnx2 *bp = netdev_priv(dev);
5883 6219
5884 if (CHIP_NUM(bp) == CHIP_NUM_5709) 6220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5885 return (ethtool_op_set_tx_hw_csum(dev, data)); 6221 return (ethtool_op_set_tx_ipv6_csum(dev, data));
5886 else 6222 else
5887 return (ethtool_op_set_tx_csum(dev, data)); 6223 return (ethtool_op_set_tx_csum(dev, data));
5888} 6224}
@@ -5939,6 +6275,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5939 case SIOCGMIIREG: { 6275 case SIOCGMIIREG: {
5940 u32 mii_regval; 6276 u32 mii_regval;
5941 6277
6278 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6279 return -EOPNOTSUPP;
6280
5942 if (!netif_running(dev)) 6281 if (!netif_running(dev))
5943 return -EAGAIN; 6282 return -EAGAIN;
5944 6283
@@ -5955,6 +6294,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5955 if (!capable(CAP_NET_ADMIN)) 6294 if (!capable(CAP_NET_ADMIN))
5956 return -EPERM; 6295 return -EPERM;
5957 6296
6297 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6298 return -EOPNOTSUPP;
6299
5958 if (!netif_running(dev)) 6300 if (!netif_running(dev))
5959 return -EAGAIN; 6301 return -EAGAIN;
5960 6302
@@ -6116,7 +6458,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6116{ 6458{
6117 struct bnx2 *bp; 6459 struct bnx2 *bp;
6118 unsigned long mem_len; 6460 unsigned long mem_len;
6119 int rc; 6461 int rc, i, j;
6120 u32 reg; 6462 u32 reg;
6121 u64 dma_mask, persist_dma_mask; 6463 u64 dma_mask, persist_dma_mask;
6122 6464
@@ -6273,7 +6615,35 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6273 goto err_out_unmap; 6615 goto err_out_unmap;
6274 } 6616 }
6275 6617
6276 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); 6618 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6619 for (i = 0, j = 0; i < 3; i++) {
6620 u8 num, k, skip0;
6621
6622 num = (u8) (reg >> (24 - (i * 8)));
6623 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6624 if (num >= k || !skip0 || k == 1) {
6625 bp->fw_version[j++] = (num / k) + '0';
6626 skip0 = 0;
6627 }
6628 }
6629 if (i != 2)
6630 bp->fw_version[j++] = '.';
6631 }
6632 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6633 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6634 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6635 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6636 int i;
6637 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6638
6639 bp->fw_version[j++] = ' ';
6640 for (i = 0; i < 3; i++) {
6641 reg = REG_RD_IND(bp, addr + i * 4);
6642 reg = swab32(reg);
6643 memcpy(&bp->fw_version[j], &reg, 4);
6644 j += 4;
6645 }
6646 }
6277 6647
6278 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER); 6648 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6279 bp->mac_addr[0] = (u8) (reg >> 8); 6649 bp->mac_addr[0] = (u8) (reg >> 8);
@@ -6315,7 +6685,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6315 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) 6685 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6316 bp->phy_flags |= PHY_SERDES_FLAG; 6686 bp->phy_flags |= PHY_SERDES_FLAG;
6317 6687
6688 bp->phy_port = PORT_TP;
6318 if (bp->phy_flags & PHY_SERDES_FLAG) { 6689 if (bp->phy_flags & PHY_SERDES_FLAG) {
6690 bp->phy_port = PORT_FIBRE;
6319 bp->flags |= NO_WOL_FLAG; 6691 bp->flags |= NO_WOL_FLAG;
6320 if (CHIP_NUM(bp) != CHIP_NUM_5706) { 6692 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6321 bp->phy_addr = 2; 6693 bp->phy_addr = 2;
@@ -6324,6 +6696,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6324 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 6696 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6325 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; 6697 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6326 } 6698 }
6699 bnx2_init_remote_phy(bp);
6700
6327 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 6701 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6328 CHIP_NUM(bp) == CHIP_NUM_5708) 6702 CHIP_NUM(bp) == CHIP_NUM_5708)
6329 bp->phy_flags |= PHY_CRC_FIX_FLAG; 6703 bp->phy_flags |= PHY_CRC_FIX_FLAG;
@@ -6363,10 +6737,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6363 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, 6737 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6364 PCI_DEVICE_ID_AMD_8132_BRIDGE, 6738 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6365 amd_8132))) { 6739 amd_8132))) {
6366 u8 rev;
6367 6740
6368 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev); 6741 if (amd_8132->revision >= 0x10 &&
6369 if (rev >= 0x10 && rev <= 0x13) { 6742 amd_8132->revision <= 0x13) {
6370 disable_msi = 1; 6743 disable_msi = 1;
6371 pci_dev_put(amd_8132); 6744 pci_dev_put(amd_8132);
6372 break; 6745 break;
@@ -6374,23 +6747,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6374 } 6747 }
6375 } 6748 }
6376 6749
6377 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; 6750 bnx2_set_default_link(bp);
6378 bp->req_line_speed = 0;
6379 if (bp->phy_flags & PHY_SERDES_FLAG) {
6380 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6381
6382 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6383 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6384 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6385 bp->autoneg = 0;
6386 bp->req_line_speed = bp->line_speed = SPEED_1000;
6387 bp->req_duplex = DUPLEX_FULL;
6388 }
6389 }
6390 else {
6391 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6392 }
6393
6394 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 6751 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6395 6752
6396 init_timer(&bp->timer); 6753 init_timer(&bp->timer);
@@ -6490,10 +6847,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6490 memcpy(dev->perm_addr, bp->mac_addr, 6); 6847 memcpy(dev->perm_addr, bp->mac_addr, 6);
6491 bp->name = board_info[ent->driver_data].name; 6848 bp->name = board_info[ent->driver_data].name;
6492 6849
6850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6493 if (CHIP_NUM(bp) == CHIP_NUM_5709) 6851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6494 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 6852 dev->features |= NETIF_F_IPV6_CSUM;
6495 else 6853
6496 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6497#ifdef BCM_VLAN 6854#ifdef BCM_VLAN
6498 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 6855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6499#endif 6856#endif
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 49a5de253b17..d8cd1afeb23d 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6338,6 +6338,8 @@ struct l2_fhdr {
6338 6338
6339#define RX_COPY_THRESH 92 6339#define RX_COPY_THRESH 92
6340 6340
6341#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff
6342
6341#define DMA_READ_CHANS 5 6343#define DMA_READ_CHANS 5
6342#define DMA_WRITE_CHANS 3 6344#define DMA_WRITE_CHANS 3
6343 6345
@@ -6537,6 +6539,7 @@ struct bnx2 {
6537#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100 6539#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
6538#define PHY_INT_MODE_LINK_READY_FLAG 0x200 6540#define PHY_INT_MODE_LINK_READY_FLAG 0x200
6539#define PHY_DIS_EARLY_DAC_FLAG 0x400 6541#define PHY_DIS_EARLY_DAC_FLAG 0x400
6542#define REMOTE_PHY_CAP_FLAG 0x800
6540 6543
6541 u32 mii_bmcr; 6544 u32 mii_bmcr;
6542 u32 mii_bmsr; 6545 u32 mii_bmsr;
@@ -6625,6 +6628,7 @@ struct bnx2 {
6625 u16 req_line_speed; 6628 u16 req_line_speed;
6626 u8 req_duplex; 6629 u8 req_duplex;
6627 6630
6631 u8 phy_port;
6628 u8 link_up; 6632 u8 link_up;
6629 6633
6630 u16 line_speed; 6634 u16 line_speed;
@@ -6656,7 +6660,7 @@ struct bnx2 {
6656 6660
6657 u32 shmem_base; 6661 u32 shmem_base;
6658 6662
6659 u32 fw_ver; 6663 char fw_version[32];
6660 6664
6661 int pm_cap; 6665 int pm_cap;
6662 int pcix_cap; 6666 int pcix_cap;
@@ -6770,7 +6774,7 @@ struct fw_info {
6770 * the firmware has timed out, the driver will assume there is no firmware 6774 * the firmware has timed out, the driver will assume there is no firmware
6771 * running and there won't be any firmware-driver synchronization during a 6775 * running and there won't be any firmware-driver synchronization during a
6772 * driver reset. */ 6776 * driver reset. */
6773#define FW_ACK_TIME_OUT_MS 100 6777#define FW_ACK_TIME_OUT_MS 1000
6774 6778
6775 6779
6776#define BNX2_DRV_RESET_SIGNATURE 0x00000000 6780#define BNX2_DRV_RESET_SIGNATURE 0x00000000
@@ -6788,6 +6792,7 @@ struct fw_info {
6788#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 6792#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
6789#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 6793#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
6790#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 6794#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
6795#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000
6791 6796
6792#define BNX2_DRV_MSG_DATA 0x00ff0000 6797#define BNX2_DRV_MSG_DATA 0x00ff0000
6793#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 6798#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
@@ -6836,6 +6841,7 @@ struct fw_info {
6836#define BNX2_LINK_STATUS_SERDES_LINK (1<<20) 6841#define BNX2_LINK_STATUS_SERDES_LINK (1<<20)
6837#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) 6842#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21)
6838#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) 6843#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22)
6844#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31)
6839 6845
6840#define BNX2_DRV_PULSE_MB 0x00000010 6846#define BNX2_DRV_PULSE_MB 0x00000010
6841#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff 6847#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff
@@ -6845,6 +6851,30 @@ struct fw_info {
6845 * This is used for debugging. */ 6851 * This is used for debugging. */
6846#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 6852#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000
6847 6853
6854#define BNX2_DRV_MB_ARG0 0x00000014
6855#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0)
6856#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1)
6857#define BNX2_NETLINK_SET_LINK_SPEED_10 \
6858 (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \
6859 BNX2_NETLINK_SET_LINK_SPEED_10FULL)
6860#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2)
6861#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3)
6862#define BNX2_NETLINK_SET_LINK_SPEED_100 \
6863 (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \
6864 BNX2_NETLINK_SET_LINK_SPEED_100FULL)
6865#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4)
6866#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5)
6867#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6)
6868#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7)
6869#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8)
6870#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9)
6871#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10)
6872#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11)
6873#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12)
6874#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13)
6875#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14)
6876#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15)
6877
6848#define BNX2_DEV_INFO_SIGNATURE 0x00000020 6878#define BNX2_DEV_INFO_SIGNATURE 0x00000020
6849#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 6879#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900
6850#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 6880#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00
@@ -7006,6 +7036,8 @@ struct fw_info {
7006#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff 7036#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff
7007#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 7037#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000
7008 7038
7039#define BNX2_MFW_VER_PTR 0x00000014c
7040
7009#define BNX2_BC_STATE_RESET_TYPE 0x000001c0 7041#define BNX2_BC_STATE_RESET_TYPE 0x000001c0
7010#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 7042#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254
7011#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff 7043#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff
@@ -7059,12 +7091,42 @@ struct fw_info {
7059#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) 7091#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600)
7060#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) 7092#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700)
7061 7093
7094#define BNX2_BC_STATE_CONDITION 0x000001c8
7095#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000
7096#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000
7097#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000
7098#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
7099#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
7100#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
7101
7062#define BNX2_BC_STATE_DEBUG_CMD 0x1dc 7102#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
7063#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 7103#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
7064#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 7104#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000
7065#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff 7105#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff
7066#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff 7106#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff
7067 7107
7108#define BNX2_FW_EVT_CODE_MB 0x354
7109#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
7110#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001
7111
7112#define BNX2_DRV_ACK_CAP_MB 0x364
7113#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000
7114#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000
7115
7116#define BNX2_FW_CAP_MB 0x368
7117#define BNX2_FW_CAP_SIGNATURE 0xaa550000
7118#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000
7119#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000
7120#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001
7121#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002
7122
7123#define BNX2_RPHY_SIGNATURE 0x36c
7124#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a
7125
7126#define BNX2_RPHY_FLAGS 0x370
7127#define BNX2_RPHY_SERDES_LINK 0x374
7128#define BNX2_RPHY_COPPER_LINK 0x378
7129
7068#define HOST_VIEW_SHMEM_BASE 0x167c00 7130#define HOST_VIEW_SHMEM_BASE 0x167c00
7069 7131
7070#endif 7132#endif
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 59b9943b077d..f6e4030c73d1 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3422,21 +3422,19 @@ done:
3422static void cas_check_pci_invariants(struct cas *cp) 3422static void cas_check_pci_invariants(struct cas *cp)
3423{ 3423{
3424 struct pci_dev *pdev = cp->pdev; 3424 struct pci_dev *pdev = cp->pdev;
3425 u8 rev;
3426 3425
3427 cp->cas_flags = 0; 3426 cp->cas_flags = 0;
3428 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
3429 if ((pdev->vendor == PCI_VENDOR_ID_SUN) && 3427 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3430 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { 3428 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3431 if (rev >= CAS_ID_REVPLUS) 3429 if (pdev->revision >= CAS_ID_REVPLUS)
3432 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3430 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3433 if (rev < CAS_ID_REVPLUS02u) 3431 if (pdev->revision < CAS_ID_REVPLUS02u)
3434 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; 3432 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3435 3433
3436 /* Original Cassini supports HW CSUM, but it's not 3434 /* Original Cassini supports HW CSUM, but it's not
3437 * enabled by default as it can trigger TX hangs. 3435 * enabled by default as it can trigger TX hangs.
3438 */ 3436 */
3439 if (rev < CAS_ID_REV2) 3437 if (pdev->revision < CAS_ID_REV2)
3440 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; 3438 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3441 } else { 3439 } else {
3442 /* Only sun has original cassini chips. */ 3440 /* Only sun has original cassini chips. */
@@ -4919,13 +4917,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4919 pci_cmd &= ~PCI_COMMAND_SERR; 4917 pci_cmd &= ~PCI_COMMAND_SERR;
4920 pci_cmd |= PCI_COMMAND_PARITY; 4918 pci_cmd |= PCI_COMMAND_PARITY;
4921 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4919 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4922 if (pci_set_mwi(pdev)) 4920 if (pci_try_set_mwi(pdev))
4923 printk(KERN_WARNING PFX "Could not enable MWI for %s\n", 4921 printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
4924 pci_name(pdev)); 4922 pci_name(pdev));
4925 4923
4926 /* 4924 /*
4927 * On some architectures, the default cache line size set 4925 * On some architectures, the default cache line size set
4928 * by pci_set_mwi reduces perforamnce. We have to increase 4926 * by pci_try_set_mwi reduces perforamnce. We have to increase
4929 * it for this case. To start, we'll print some configuration 4927 * it for this case. To start, we'll print some configuration
4930 * data. 4928 * data.
4931 */ 4929 */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 8eddd23a3a51..eb508bf8022a 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 4 41#define FW_VERSION_MAJOR 4
42#define FW_VERSION_MINOR 1 42#define FW_VERSION_MINOR 3
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 74ec64a1625d..04e3710c9082 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -250,7 +250,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
250 np->an_enable = 1; 250 np->an_enable = 1;
251 mii_set_media (dev); 251 mii_set_media (dev);
252 } 252 }
253 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
254 253
255 err = register_netdev (dev); 254 err = register_netdev (dev);
256 if (err) 255 if (err)
@@ -866,9 +865,9 @@ receive_packet (struct net_device *dev)
866 PCI_DMA_FROMDEVICE); 865 PCI_DMA_FROMDEVICE);
867 /* 16 byte align the IP header */ 866 /* 16 byte align the IP header */
868 skb_reserve (skb, 2); 867 skb_reserve (skb, 2);
869 eth_copy_and_sum (skb, 868 skb_copy_to_linear_data (skb,
870 np->rx_skbuff[entry]->data, 869 np->rx_skbuff[entry]->data,
871 pkt_len, 0); 870 pkt_len);
872 skb_put (skb, pkt_len); 871 skb_put (skb, pkt_len);
873 pci_dma_sync_single_for_device(np->pdev, 872 pci_dma_sync_single_for_device(np->pdev,
874 desc->fraginfo & 873 desc->fraginfo &
@@ -879,7 +878,7 @@ receive_packet (struct net_device *dev)
879 skb->protocol = eth_type_trans (skb, dev); 878 skb->protocol = eth_type_trans (skb, dev);
880#if 0 879#if 0
881 /* Checksum done by hw, but csum value unavailable. */ 880 /* Checksum done by hw, but csum value unavailable. */
882 if (np->pci_rev_id >= 0x0c && 881 if (np->pdev->pci_rev_id >= 0x0c &&
883 !(frame_status & (TCPError | UDPError | IPError))) { 882 !(frame_status & (TCPError | UDPError | IPError))) {
884 skb->ip_summed = CHECKSUM_UNNECESSARY; 883 skb->ip_summed = CHECKSUM_UNNECESSARY;
885 } 884 }
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 814c449c359f..e443065a452e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -668,7 +668,6 @@ struct netdev_private {
668 unsigned int rx_flow:1; /* Rx flow control enable */ 668 unsigned int rx_flow:1; /* Rx flow control enable */
669 unsigned int phy_media:1; /* 1: fiber, 0: copper */ 669 unsigned int phy_media:1; /* 1: fiber, 0: copper */
670 unsigned int link_status:1; /* Current link status */ 670 unsigned int link_status:1; /* Current link status */
671 unsigned char pci_rev_id; /* PCI revision ID */
672 struct netdev_desc *last_tx; /* Last Tx descriptor used. */ 671 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
673 unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */ 672 unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
674 unsigned long cur_tx, old_tx; 673 unsigned long cur_tx, old_tx;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 264fa0e2e075..c3de81bf090a 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -104,6 +104,18 @@
104#define PRINTK(args...) printk(KERN_DEBUG args) 104#define PRINTK(args...) printk(KERN_DEBUG args)
105#endif 105#endif
106 106
107#ifdef CONFIG_BLACKFIN
108#define readsb insb
109#define readsw insw
110#define readsl insl
111#define writesb outsb
112#define writesw outsw
113#define writesl outsl
114#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH)
115#else
116#define DM9000_IRQ_FLAGS IRQF_SHARED
117#endif
118
107/* 119/*
108 * Transmit timeout, default 5 seconds. 120 * Transmit timeout, default 5 seconds.
109 */ 121 */
@@ -431,6 +443,9 @@ dm9000_probe(struct platform_device *pdev)
431 db->io_addr = (void __iomem *)base; 443 db->io_addr = (void __iomem *)base;
432 db->io_data = (void __iomem *)(base + 4); 444 db->io_data = (void __iomem *)(base + 4);
433 445
446 /* ensure at least we have a default set of IO routines */
447 dm9000_set_io(db, 2);
448
434 } else { 449 } else {
435 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 450 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
436 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 451 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -614,7 +629,7 @@ dm9000_open(struct net_device *dev)
614 629
615 PRINTK2("entering dm9000_open\n"); 630 PRINTK2("entering dm9000_open\n");
616 631
617 if (request_irq(dev->irq, &dm9000_interrupt, IRQF_SHARED, dev->name, dev)) 632 if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev))
618 return -EAGAIN; 633 return -EAGAIN;
619 634
620 /* Initialize DM9000 board */ 635 /* Initialize DM9000 board */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 60673bc292c0..756a6bcb038d 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -34,11 +34,12 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/rtnetlink.h>
38#include <net/rtnetlink.h>
37 39
38static int numdummies = 1; 40static int numdummies = 1;
39 41
40static int dummy_xmit(struct sk_buff *skb, struct net_device *dev); 42static int dummy_xmit(struct sk_buff *skb, struct net_device *dev);
41static struct net_device_stats *dummy_get_stats(struct net_device *dev);
42 43
43static int dummy_set_address(struct net_device *dev, void *p) 44static int dummy_set_address(struct net_device *dev, void *p)
44{ 45{
@@ -56,13 +57,13 @@ static void set_multicast_list(struct net_device *dev)
56{ 57{
57} 58}
58 59
59static void __init dummy_setup(struct net_device *dev) 60static void dummy_setup(struct net_device *dev)
60{ 61{
61 /* Initialize the device structure. */ 62 /* Initialize the device structure. */
62 dev->get_stats = dummy_get_stats;
63 dev->hard_start_xmit = dummy_xmit; 63 dev->hard_start_xmit = dummy_xmit;
64 dev->set_multicast_list = set_multicast_list; 64 dev->set_multicast_list = set_multicast_list;
65 dev->set_mac_address = dummy_set_address; 65 dev->set_mac_address = dummy_set_address;
66 dev->destructor = free_netdev;
66 67
67 /* Fill in device structure with ethernet-generic values. */ 68 /* Fill in device structure with ethernet-generic values. */
68 ether_setup(dev); 69 ether_setup(dev);
@@ -76,77 +77,80 @@ static void __init dummy_setup(struct net_device *dev)
76 77
77static int dummy_xmit(struct sk_buff *skb, struct net_device *dev) 78static int dummy_xmit(struct sk_buff *skb, struct net_device *dev)
78{ 79{
79 struct net_device_stats *stats = netdev_priv(dev); 80 dev->stats.tx_packets++;
80 81 dev->stats.tx_bytes += skb->len;
81 stats->tx_packets++;
82 stats->tx_bytes+=skb->len;
83 82
84 dev_kfree_skb(skb); 83 dev_kfree_skb(skb);
85 return 0; 84 return 0;
86} 85}
87 86
88static struct net_device_stats *dummy_get_stats(struct net_device *dev) 87static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
89{ 88{
90 return netdev_priv(dev); 89 if (tb[IFLA_ADDRESS]) {
90 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 return -EINVAL;
92 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
93 return -EADDRNOTAVAIL;
94 }
95 return 0;
91} 96}
92 97
93static struct net_device **dummies; 98static struct rtnl_link_ops dummy_link_ops __read_mostly = {
99 .kind = "dummy",
100 .setup = dummy_setup,
101 .validate = dummy_validate,
102};
94 103
95/* Number of dummy devices to be set up by this module. */ 104/* Number of dummy devices to be set up by this module. */
96module_param(numdummies, int, 0); 105module_param(numdummies, int, 0);
97MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); 106MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
98 107
99static int __init dummy_init_one(int index) 108static int __init dummy_init_one(void)
100{ 109{
101 struct net_device *dev_dummy; 110 struct net_device *dev_dummy;
102 int err; 111 int err;
103 112
104 dev_dummy = alloc_netdev(sizeof(struct net_device_stats), 113 dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
105 "dummy%d", dummy_setup);
106
107 if (!dev_dummy) 114 if (!dev_dummy)
108 return -ENOMEM; 115 return -ENOMEM;
109 116
110 if ((err = register_netdev(dev_dummy))) { 117 err = dev_alloc_name(dev_dummy, dev_dummy->name);
111 free_netdev(dev_dummy); 118 if (err < 0)
112 dev_dummy = NULL; 119 goto err;
113 } else {
114 dummies[index] = dev_dummy;
115 }
116 120
117 return err; 121 dev_dummy->rtnl_link_ops = &dummy_link_ops;
118} 122 err = register_netdevice(dev_dummy);
123 if (err < 0)
124 goto err;
125 return 0;
119 126
120static void dummy_free_one(int index) 127err:
121{ 128 free_netdev(dev_dummy);
122 unregister_netdev(dummies[index]); 129 return err;
123 free_netdev(dummies[index]);
124} 130}
125 131
126static int __init dummy_init_module(void) 132static int __init dummy_init_module(void)
127{ 133{
128 int i, err = 0; 134 int i, err = 0;
129 dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL); 135
130 if (!dummies) 136 rtnl_lock();
131 return -ENOMEM; 137 err = __rtnl_link_register(&dummy_link_ops);
138
132 for (i = 0; i < numdummies && !err; i++) 139 for (i = 0; i < numdummies && !err; i++)
133 err = dummy_init_one(i); 140 err = dummy_init_one();
134 if (err) { 141 if (err < 0)
135 i--; 142 __rtnl_link_unregister(&dummy_link_ops);
136 while (--i >= 0) 143 rtnl_unlock();
137 dummy_free_one(i); 144
138 }
139 return err; 145 return err;
140} 146}
141 147
142static void __exit dummy_cleanup_module(void) 148static void __exit dummy_cleanup_module(void)
143{ 149{
144 int i; 150 rtnl_link_unregister(&dummy_link_ops);
145 for (i = 0; i < numdummies; i++)
146 dummy_free_one(i);
147 kfree(dummies);
148} 151}
149 152
150module_init(dummy_init_module); 153module_init(dummy_init_module);
151module_exit(dummy_cleanup_module); 154module_exit(dummy_cleanup_module);
152MODULE_LICENSE("GPL"); 155MODULE_LICENSE("GPL");
156MODULE_ALIAS_RTNL_LINK("dummy");
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 74ea6373c7cd..6b6401e9304e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -583,7 +583,6 @@ struct nic {
583 u32 rx_tco_frames; 583 u32 rx_tco_frames;
584 u32 rx_over_length_errors; 584 u32 rx_over_length_errors;
585 585
586 u8 rev_id;
587 u16 leds; 586 u16 leds;
588 u16 eeprom_wc; 587 u16 eeprom_wc;
589 u16 eeprom[256]; 588 u16 eeprom[256];
@@ -937,9 +936,8 @@ static void e100_get_defaults(struct nic *nic)
937 struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; 936 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
938 struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; 937 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
939 938
940 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
941 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ 939 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
942 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id; 940 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
943 if(nic->mac == mac_unknown) 941 if(nic->mac == mac_unknown)
944 nic->mac = mac_82557_D100_A; 942 nic->mac = mac_82557_D100_A;
945 943
@@ -1279,7 +1277,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb
1279 if (nic->flags & ich) 1277 if (nic->flags & ich)
1280 goto noloaducode; 1278 goto noloaducode;
1281 1279
1282 /* Search for ucode match against h/w rev_id */ 1280 /* Search for ucode match against h/w revision */
1283 for (opts = ucode_opts; opts->mac; opts++) { 1281 for (opts = ucode_opts; opts->mac; opts++) {
1284 int i; 1282 int i;
1285 u32 *ucode = opts->ucode; 1283 u32 *ucode = opts->ucode;
@@ -2238,7 +2236,7 @@ static void e100_get_regs(struct net_device *netdev,
2238 u32 *buff = p; 2236 u32 *buff = p;
2239 int i; 2237 int i;
2240 2238
2241 regs->version = (1 << 24) | nic->rev_id; 2239 regs->version = (1 << 24) | nic->pdev->revision;
2242 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | 2240 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2243 ioread8(&nic->csr->scb.cmd_lo) << 16 | 2241 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2244 ioread16(&nic->csr->scb.status); 2242 ioread16(&nic->csr->scb.status);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index cf8af928a69c..f48b659e0c2b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1266,8 +1266,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
1266 hw->device_id = pdev->device; 1266 hw->device_id = pdev->device;
1267 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1267 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1268 hw->subsystem_id = pdev->subsystem_device; 1268 hw->subsystem_id = pdev->subsystem_device;
1269 1269 hw->revision_id = pdev->revision;
1270 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
1271 1270
1272 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1271 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1273 1272
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9800341956a2..9afa47edfc58 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev)
1801 1801
1802#if 1 || USE_IP_CSUM 1802#if 1 || USE_IP_CSUM
1803 /* Packet is in one chunk -- we can copy + cksum. */ 1803 /* Packet is in one chunk -- we can copy + cksum. */
1804 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); 1804 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1805 skb_put(skb, pkt_len); 1805 skb_put(skb, pkt_len);
1806#else 1806#else
1807 skb_copy_from_linear_data(sp->rx_skbuff[entry], 1807 skb_copy_from_linear_data(sp->rx_skbuff[entry],
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 5e517946f46a..119778401e48 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget)
1201 ep->rx_ring[entry].bufaddr, 1201 ep->rx_ring[entry].bufaddr,
1202 ep->rx_buf_sz, 1202 ep->rx_buf_sz,
1203 PCI_DMA_FROMDEVICE); 1203 PCI_DMA_FROMDEVICE);
1204 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0); 1204 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1205 skb_put(skb, pkt_len); 1205 skb_put(skb, pkt_len);
1206 pci_dma_sync_single_for_device(ep->pci_dev, 1206 pci_dma_sync_single_for_device(ep->pci_dev,
1207 ep->rx_ring[entry].bufaddr, 1207 ep->rx_ring[entry].bufaddr,
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index abe9b089c610..ff9f177d7157 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev)
1727 /* Call copy + cksum if available. */ 1727 /* Call copy + cksum if available. */
1728 1728
1729#if ! defined(__alpha__) 1729#if ! defined(__alpha__)
1730 eth_copy_and_sum(skb, 1730 skb_copy_to_linear_data(skb,
1731 np->cur_rx->skbuff->data, pkt_len, 0); 1731 np->cur_rx->skbuff->data, pkt_len);
1732 skb_put(skb, pkt_len); 1732 skb_put(skb, pkt_len);
1733#else 1733#else
1734 memcpy(skb_put(skb, pkt_len), 1734 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 255b09124e11..03023dd17829 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
648 fep->stats.rx_dropped++; 648 fep->stats.rx_dropped++;
649 } else { 649 } else {
650 skb_put(skb,pkt_len-4); /* Make room */ 650 skb_put(skb,pkt_len-4); /* Make room */
651 eth_copy_and_sum(skb, data, pkt_len-4, 0); 651 skb_copy_to_linear_data(skb, data, pkt_len-4);
652 skb->protocol=eth_type_trans(skb,dev); 652 skb->protocol=eth_type_trans(skb,dev);
653 netif_rx(skb); 653 netif_rx(skb);
654 } 654 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index a361dba5ddaa..136827f8dc2e 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5226,15 +5226,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5226 np->wolenabled = 0; 5226 np->wolenabled = 0;
5227 5227
5228 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5228 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5229 u8 revision_id;
5230 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
5231 5229
5232 /* take phy and nic out of low power mode */ 5230 /* take phy and nic out of low power mode */
5233 powerstate = readl(base + NvRegPowerState2); 5231 powerstate = readl(base + NvRegPowerState2);
5234 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5232 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5235 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5233 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5236 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && 5234 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5237 revision_id >= 0xA3) 5235 pci_dev->revision >= 0xA3)
5238 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5236 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5239 writel(powerstate, base + NvRegPowerState2); 5237 writel(powerstate, base + NvRegPowerState2);
5240 } 5238 }
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 2521b111b3a5..15254dc7876a 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev)
1575 PCI_DMA_FROMDEVICE); 1575 PCI_DMA_FROMDEVICE);
1576 /* Call copy + cksum if available. */ 1576 /* Call copy + cksum if available. */
1577#if 1 || USE_IP_COPYSUM 1577#if 1 || USE_IP_COPYSUM
1578 eth_copy_and_sum(skb, 1578 skb_copy_to_linear_data(skb,
1579 hmp->rx_skbuff[entry]->data, pkt_len, 0); 1579 hmp->rx_skbuff[entry]->data, pkt_len);
1580 skb_put(skb, pkt_len); 1580 skb_put(skb, pkt_len);
1581#else 1581#else
1582 memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma 1582 memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 6ec3d500f334..d96eb7229548 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1337,7 +1337,7 @@ const char * buf, size_t count)
1337 1337
1338#define ATTR(_name, _mode) \ 1338#define ATTR(_name, _mode) \
1339 struct attribute veth_##_name##_attr = { \ 1339 struct attribute veth_##_name##_attr = { \
1340 .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \ 1340 .name = __stringify(_name), .mode = _mode, \
1341 }; 1341 };
1342 1342
1343static ATTR(active, 0644); 1343static ATTR(active, 0644);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 07b4c0d7a75c..f5c3598e59af 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,13 +136,14 @@ resched:
136 136
137} 137}
138 138
139static void __init ifb_setup(struct net_device *dev) 139static void ifb_setup(struct net_device *dev)
140{ 140{
141 /* Initialize the device structure. */ 141 /* Initialize the device structure. */
142 dev->get_stats = ifb_get_stats; 142 dev->get_stats = ifb_get_stats;
143 dev->hard_start_xmit = ifb_xmit; 143 dev->hard_start_xmit = ifb_xmit;
144 dev->open = &ifb_open; 144 dev->open = &ifb_open;
145 dev->stop = &ifb_close; 145 dev->stop = &ifb_close;
146 dev->destructor = free_netdev;
146 147
147 /* Fill in device structure with ethernet-generic values. */ 148 /* Fill in device structure with ethernet-generic values. */
148 ether_setup(dev); 149 ether_setup(dev);
@@ -197,12 +198,6 @@ static struct net_device_stats *ifb_get_stats(struct net_device *dev)
197 return stats; 198 return stats;
198} 199}
199 200
200static struct net_device **ifbs;
201
202/* Number of ifb devices to be set up by this module. */
203module_param(numifbs, int, 0);
204MODULE_PARM_DESC(numifbs, "Number of ifb devices");
205
206static int ifb_close(struct net_device *dev) 201static int ifb_close(struct net_device *dev)
207{ 202{
208 struct ifb_private *dp = netdev_priv(dev); 203 struct ifb_private *dp = netdev_priv(dev);
@@ -226,6 +221,28 @@ static int ifb_open(struct net_device *dev)
226 return 0; 221 return 0;
227} 222}
228 223
224static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
225{
226 if (tb[IFLA_ADDRESS]) {
227 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
228 return -EINVAL;
229 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
230 return -EADDRNOTAVAIL;
231 }
232 return 0;
233}
234
235static struct rtnl_link_ops ifb_link_ops __read_mostly = {
236 .kind = "ifb",
237 .priv_size = sizeof(struct ifb_private),
238 .setup = ifb_setup,
239 .validate = ifb_validate,
240};
241
242/* Number of ifb devices to be set up by this module. */
243module_param(numifbs, int, 0);
244MODULE_PARM_DESC(numifbs, "Number of ifb devices");
245
229static int __init ifb_init_one(int index) 246static int __init ifb_init_one(int index)
230{ 247{
231 struct net_device *dev_ifb; 248 struct net_device *dev_ifb;
@@ -237,49 +254,44 @@ static int __init ifb_init_one(int index)
237 if (!dev_ifb) 254 if (!dev_ifb)
238 return -ENOMEM; 255 return -ENOMEM;
239 256
240 if ((err = register_netdev(dev_ifb))) { 257 err = dev_alloc_name(dev_ifb, dev_ifb->name);
241 free_netdev(dev_ifb); 258 if (err < 0)
242 dev_ifb = NULL; 259 goto err;
243 } else {
244 ifbs[index] = dev_ifb;
245 }
246 260
247 return err; 261 dev_ifb->rtnl_link_ops = &ifb_link_ops;
248} 262 err = register_netdevice(dev_ifb);
263 if (err < 0)
264 goto err;
265 return 0;
249 266
250static void ifb_free_one(int index) 267err:
251{ 268 free_netdev(dev_ifb);
252 unregister_netdev(ifbs[index]); 269 return err;
253 free_netdev(ifbs[index]);
254} 270}
255 271
256static int __init ifb_init_module(void) 272static int __init ifb_init_module(void)
257{ 273{
258 int i, err = 0; 274 int i, err;
259 ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); 275
260 if (!ifbs) 276 rtnl_lock();
261 return -ENOMEM; 277 err = __rtnl_link_register(&ifb_link_ops);
278
262 for (i = 0; i < numifbs && !err; i++) 279 for (i = 0; i < numifbs && !err; i++)
263 err = ifb_init_one(i); 280 err = ifb_init_one(i);
264 if (err) { 281 if (err)
265 i--; 282 __rtnl_link_unregister(&ifb_link_ops);
266 while (--i >= 0) 283 rtnl_unlock();
267 ifb_free_one(i);
268 }
269 284
270 return err; 285 return err;
271} 286}
272 287
273static void __exit ifb_cleanup_module(void) 288static void __exit ifb_cleanup_module(void)
274{ 289{
275 int i; 290 rtnl_link_unregister(&ifb_link_ops);
276
277 for (i = 0; i < numifbs; i++)
278 ifb_free_one(i);
279 kfree(ifbs);
280} 291}
281 292
282module_init(ifb_init_module); 293module_init(ifb_init_module);
283module_exit(ifb_cleanup_module); 294module_exit(ifb_cleanup_module);
284MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
285MODULE_AUTHOR("Jamal Hadi Salim"); 296MODULE_AUTHOR("Jamal Hadi Salim");
297MODULE_ALIAS_RTNL_LINK("ifb");
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 217429122e79..bdd5c979bead 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -4,7 +4,7 @@
4* Version: 0.1.1 4* Version: 0.1.1
5* Description: Irda KingSun/DonShine USB Dongle 5* Description: Irda KingSun/DonShine USB Dongle
6* Status: Experimental 6* Status: Experimental
7* Author: Alex Villac�s Lasso <a_villacis@palosanto.com> 7* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
8* 8*
9* Based on stir4200 and mcs7780 drivers, with (strange?) differences 9* Based on stir4200 and mcs7780 drivers, with (strange?) differences
10* 10*
@@ -652,6 +652,6 @@ static void __exit kingsun_cleanup(void)
652} 652}
653module_exit(kingsun_cleanup); 653module_exit(kingsun_cleanup);
654 654
655MODULE_AUTHOR("Alex Villac�s Lasso <a_villacis@palosanto.com>"); 655MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
656MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine"); 656MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
657MODULE_LICENSE("GPL"); 657MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bf78ef1120ad..0538ca9ce058 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -44,6 +44,7 @@ MODULE_LICENSE("GPL");
44#include <linux/time.h> 44#include <linux/time.h>
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/seq_file.h> 46#include <linux/seq_file.h>
47#include <linux/mutex.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/byteorder.h> 49#include <asm/byteorder.h>
49 50
@@ -1660,8 +1661,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1660 idev = ndev->priv; 1661 idev = ndev->priv;
1661 1662
1662 spin_lock_init(&idev->lock); 1663 spin_lock_init(&idev->lock);
1663 init_MUTEX(&idev->sem); 1664 mutex_init(&idev->mtx);
1664 down(&idev->sem); 1665 mutex_lock(&idev->mtx);
1665 idev->pdev = pdev; 1666 idev->pdev = pdev;
1666 1667
1667 if (vlsi_irda_init(ndev) < 0) 1668 if (vlsi_irda_init(ndev) < 0)
@@ -1689,12 +1690,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1689 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1690 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
1690 1691
1691 pci_set_drvdata(pdev, ndev); 1692 pci_set_drvdata(pdev, ndev);
1692 up(&idev->sem); 1693 mutex_unlock(&idev->mtx);
1693 1694
1694 return 0; 1695 return 0;
1695 1696
1696out_freedev: 1697out_freedev:
1697 up(&idev->sem); 1698 mutex_unlock(&idev->mtx);
1698 free_netdev(ndev); 1699 free_netdev(ndev);
1699out_disable: 1700out_disable:
1700 pci_disable_device(pdev); 1701 pci_disable_device(pdev);
@@ -1716,12 +1717,12 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
1716 unregister_netdev(ndev); 1717 unregister_netdev(ndev);
1717 1718
1718 idev = ndev->priv; 1719 idev = ndev->priv;
1719 down(&idev->sem); 1720 mutex_lock(&idev->mtx);
1720 if (idev->proc_entry) { 1721 if (idev->proc_entry) {
1721 remove_proc_entry(ndev->name, vlsi_proc_root); 1722 remove_proc_entry(ndev->name, vlsi_proc_root);
1722 idev->proc_entry = NULL; 1723 idev->proc_entry = NULL;
1723 } 1724 }
1724 up(&idev->sem); 1725 mutex_unlock(&idev->mtx);
1725 1726
1726 free_netdev(ndev); 1727 free_netdev(ndev);
1727 1728
@@ -1751,7 +1752,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1751 return 0; 1752 return 0;
1752 } 1753 }
1753 idev = ndev->priv; 1754 idev = ndev->priv;
1754 down(&idev->sem); 1755 mutex_lock(&idev->mtx);
1755 if (pdev->current_state != 0) { /* already suspended */ 1756 if (pdev->current_state != 0) { /* already suspended */
1756 if (state.event > pdev->current_state) { /* simply go deeper */ 1757 if (state.event > pdev->current_state) { /* simply go deeper */
1757 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1758 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1759,7 +1760,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1759 } 1760 }
1760 else 1761 else
1761 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); 1762 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
1762 up(&idev->sem); 1763 mutex_unlock(&idev->mtx);
1763 return 0; 1764 return 0;
1764 } 1765 }
1765 1766
@@ -1775,7 +1776,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1775 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1776 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1776 pdev->current_state = state.event; 1777 pdev->current_state = state.event;
1777 idev->resume_ok = 1; 1778 idev->resume_ok = 1;
1778 up(&idev->sem); 1779 mutex_unlock(&idev->mtx);
1779 return 0; 1780 return 0;
1780} 1781}
1781 1782
@@ -1790,9 +1791,9 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1790 return 0; 1791 return 0;
1791 } 1792 }
1792 idev = ndev->priv; 1793 idev = ndev->priv;
1793 down(&idev->sem); 1794 mutex_lock(&idev->mtx);
1794 if (pdev->current_state == 0) { 1795 if (pdev->current_state == 0) {
1795 up(&idev->sem); 1796 mutex_unlock(&idev->mtx);
1796 IRDA_WARNING("%s - %s: already resumed\n", 1797 IRDA_WARNING("%s - %s: already resumed\n",
1797 __FUNCTION__, pci_name(pdev)); 1798 __FUNCTION__, pci_name(pdev));
1798 return 0; 1799 return 0;
@@ -1814,7 +1815,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1814 * device and independently resume_ok should catch any garbage config. 1815 * device and independently resume_ok should catch any garbage config.
1815 */ 1816 */
1816 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); 1817 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
1817 up(&idev->sem); 1818 mutex_unlock(&idev->mtx);
1818 return 0; 1819 return 0;
1819 } 1820 }
1820 1821
@@ -1824,7 +1825,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1824 netif_device_attach(ndev); 1825 netif_device_attach(ndev);
1825 } 1826 }
1826 idev->resume_ok = 0; 1827 idev->resume_ok = 0;
1827 up(&idev->sem); 1828 mutex_unlock(&idev->mtx);
1828 return 0; 1829 return 0;
1829} 1830}
1830 1831
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 2d3b773d8e35..ca12a6096419 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -728,7 +728,7 @@ typedef struct vlsi_irda_dev {
728 struct timeval last_rx; 728 struct timeval last_rx;
729 729
730 spinlock_t lock; 730 spinlock_t lock;
731 struct semaphore sem; 731 struct mutex mtx;
732 732
733 u8 resume_ok; 733 u8 resume_ok;
734 struct proc_dir_entry *proc_entry; 734 struct proc_dir_entry *proc_entry;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5f694fc4a21..d9ce1aef148a 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
111 skb = dev_alloc_skb(desc->pkt_length + 2); 111 skb = dev_alloc_skb(desc->pkt_length + 2);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb_reserve(skb, 2); 113 skb_reserve(skb, 2);
114 eth_copy_and_sum(skb, buf, desc->pkt_length, 0); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117 117
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 0fe96c85828b..a2f37e52b928 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1186,9 +1186,9 @@ lance_rx(struct net_device *dev)
1186 } 1186 }
1187 skb_reserve(skb,2); /* 16 byte align */ 1187 skb_reserve(skb,2); /* 16 byte align */
1188 skb_put(skb,pkt_len); /* Make room */ 1188 skb_put(skb,pkt_len); /* Make room */
1189 eth_copy_and_sum(skb, 1189 skb_copy_to_linear_data(skb,
1190 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), 1190 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1191 pkt_len,0); 1191 pkt_len);
1192 skb->protocol=eth_type_trans(skb,dev); 1192 skb->protocol=eth_type_trans(skb,dev);
1193 netif_rx(skb); 1193 netif_rx(skb);
1194 dev->last_rx = jiffies; 1194 dev->last_rx = jiffies;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
new file mode 100644
index 000000000000..dc74d006e01f
--- /dev/null
+++ b/drivers/net/macvlan.c
@@ -0,0 +1,496 @@
1/*
2 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of
7 * the License, or (at your option) any later version.
8 *
9 * The code this is based on carried the following copyright notice:
10 * ---
11 * (C) Copyright 2001-2006
12 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
13 * Re-worked by Ben Greear <greearb@candelatech.com>
14 * ---
15 */
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/list.h>
24#include <linux/notifier.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/if_arp.h>
29#include <linux/if_link.h>
30#include <linux/if_macvlan.h>
31#include <net/rtnetlink.h>
32
33#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
34
35struct macvlan_port {
36 struct net_device *dev;
37 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
38 struct list_head vlans;
39};
40
41struct macvlan_dev {
42 struct net_device *dev;
43 struct list_head list;
44 struct hlist_node hlist;
45 struct macvlan_port *port;
46 struct net_device *lowerdev;
47};
48
49
50static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
51 const unsigned char *addr)
52{
53 struct macvlan_dev *vlan;
54 struct hlist_node *n;
55
56 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
57 if (!compare_ether_addr(vlan->dev->dev_addr, addr))
58 return vlan;
59 }
60 return NULL;
61}
62
63static void macvlan_broadcast(struct sk_buff *skb,
64 const struct macvlan_port *port)
65{
66 const struct ethhdr *eth = eth_hdr(skb);
67 const struct macvlan_dev *vlan;
68 struct hlist_node *n;
69 struct net_device *dev;
70 struct sk_buff *nskb;
71 unsigned int i;
72
73 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
74 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
75 dev = vlan->dev;
76 if (unlikely(!(dev->flags & IFF_UP)))
77 continue;
78
79 nskb = skb_clone(skb, GFP_ATOMIC);
80 if (nskb == NULL) {
81 dev->stats.rx_errors++;
82 dev->stats.rx_dropped++;
83 continue;
84 }
85
86 dev->stats.rx_bytes += skb->len + ETH_HLEN;
87 dev->stats.rx_packets++;
88 dev->stats.multicast++;
89 dev->last_rx = jiffies;
90
91 nskb->dev = dev;
92 if (!compare_ether_addr(eth->h_dest, dev->broadcast))
93 nskb->pkt_type = PACKET_BROADCAST;
94 else
95 nskb->pkt_type = PACKET_MULTICAST;
96
97 netif_rx(nskb);
98 }
99 }
100}
101
102/* called under rcu_read_lock() from netif_receive_skb */
103static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
104{
105 const struct ethhdr *eth = eth_hdr(skb);
106 const struct macvlan_port *port;
107 const struct macvlan_dev *vlan;
108 struct net_device *dev;
109
110 port = rcu_dereference(skb->dev->macvlan_port);
111 if (port == NULL)
112 return skb;
113
114 if (is_multicast_ether_addr(eth->h_dest)) {
115 macvlan_broadcast(skb, port);
116 return skb;
117 }
118
119 vlan = macvlan_hash_lookup(port, eth->h_dest);
120 if (vlan == NULL)
121 return skb;
122
123 dev = vlan->dev;
124 if (unlikely(!(dev->flags & IFF_UP))) {
125 kfree_skb(skb);
126 return NULL;
127 }
128
129 skb = skb_share_check(skb, GFP_ATOMIC);
130 if (skb == NULL) {
131 dev->stats.rx_errors++;
132 dev->stats.rx_dropped++;
133 return NULL;
134 }
135
136 dev->stats.rx_bytes += skb->len + ETH_HLEN;
137 dev->stats.rx_packets++;
138 dev->last_rx = jiffies;
139
140 skb->dev = dev;
141 skb->pkt_type = PACKET_HOST;
142
143 netif_rx(skb);
144 return NULL;
145}
146
147static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
148{
149 const struct macvlan_dev *vlan = netdev_priv(dev);
150 unsigned int len = skb->len;
151 int ret;
152
153 skb->dev = vlan->lowerdev;
154 ret = dev_queue_xmit(skb);
155
156 if (likely(ret == NET_XMIT_SUCCESS)) {
157 dev->stats.tx_packets++;
158 dev->stats.tx_bytes += len;
159 } else {
160 dev->stats.tx_errors++;
161 dev->stats.tx_aborted_errors++;
162 }
163 return NETDEV_TX_OK;
164}
165
166static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
167 unsigned short type, void *daddr, void *saddr,
168 unsigned len)
169{
170 const struct macvlan_dev *vlan = netdev_priv(dev);
171 struct net_device *lowerdev = vlan->lowerdev;
172
173 return lowerdev->hard_header(skb, lowerdev, type, daddr,
174 saddr ? : dev->dev_addr, len);
175}
176
177static int macvlan_open(struct net_device *dev)
178{
179 struct macvlan_dev *vlan = netdev_priv(dev);
180 struct macvlan_port *port = vlan->port;
181 struct net_device *lowerdev = vlan->lowerdev;
182 int err;
183
184 err = dev_unicast_add(lowerdev, dev->dev_addr, ETH_ALEN);
185 if (err < 0)
186 return err;
187 if (dev->flags & IFF_ALLMULTI)
188 dev_set_allmulti(lowerdev, 1);
189
190 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[dev->dev_addr[5]]);
191 return 0;
192}
193
194static int macvlan_stop(struct net_device *dev)
195{
196 struct macvlan_dev *vlan = netdev_priv(dev);
197 struct net_device *lowerdev = vlan->lowerdev;
198
199 dev_mc_unsync(lowerdev, dev);
200 if (dev->flags & IFF_ALLMULTI)
201 dev_set_allmulti(lowerdev, -1);
202
203 dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN);
204
205 hlist_del_rcu(&vlan->hlist);
206 synchronize_rcu();
207 return 0;
208}
209
210static void macvlan_change_rx_flags(struct net_device *dev, int change)
211{
212 struct macvlan_dev *vlan = netdev_priv(dev);
213 struct net_device *lowerdev = vlan->lowerdev;
214
215 if (change & IFF_ALLMULTI)
216 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
217}
218
219static void macvlan_set_multicast_list(struct net_device *dev)
220{
221 struct macvlan_dev *vlan = netdev_priv(dev);
222
223 dev_mc_sync(vlan->lowerdev, dev);
224}
225
226static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
227{
228 struct macvlan_dev *vlan = netdev_priv(dev);
229
230 if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
231 return -EINVAL;
232 dev->mtu = new_mtu;
233 return 0;
234}
235
236/*
237 * macvlan network devices have devices nesting below it and are a special
238 * "super class" of normal network devices; split their locks off into a
239 * separate class since they always nest.
240 */
241static struct lock_class_key macvlan_netdev_xmit_lock_key;
242
243#define MACVLAN_FEATURES \
244 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
245 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
246 NETIF_F_TSO_ECN | NETIF_F_TSO6)
247
248#define MACVLAN_STATE_MASK \
249 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
250
251static int macvlan_init(struct net_device *dev)
252{
253 struct macvlan_dev *vlan = netdev_priv(dev);
254 const struct net_device *lowerdev = vlan->lowerdev;
255
256 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
257 (lowerdev->state & MACVLAN_STATE_MASK);
258 dev->features = lowerdev->features & MACVLAN_FEATURES;
259 dev->iflink = lowerdev->ifindex;
260
261 lockdep_set_class(&dev->_xmit_lock, &macvlan_netdev_xmit_lock_key);
262 return 0;
263}
264
265static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
266 struct ethtool_drvinfo *drvinfo)
267{
268 snprintf(drvinfo->driver, 32, "macvlan");
269 snprintf(drvinfo->version, 32, "0.1");
270}
271
272static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
273{
274 const struct macvlan_dev *vlan = netdev_priv(dev);
275 struct net_device *lowerdev = vlan->lowerdev;
276
277 if (lowerdev->ethtool_ops->get_rx_csum == NULL)
278 return 0;
279 return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
280}
281
282static const struct ethtool_ops macvlan_ethtool_ops = {
283 .get_link = ethtool_op_get_link,
284 .get_rx_csum = macvlan_ethtool_get_rx_csum,
285 .get_tx_csum = ethtool_op_get_tx_csum,
286 .get_tso = ethtool_op_get_tso,
287 .get_ufo = ethtool_op_get_ufo,
288 .get_sg = ethtool_op_get_sg,
289 .get_drvinfo = macvlan_ethtool_get_drvinfo,
290};
291
292static void macvlan_setup(struct net_device *dev)
293{
294 ether_setup(dev);
295
296 dev->init = macvlan_init;
297 dev->open = macvlan_open;
298 dev->stop = macvlan_stop;
299 dev->change_mtu = macvlan_change_mtu;
300 dev->change_rx_flags = macvlan_change_rx_flags;
301 dev->set_multicast_list = macvlan_set_multicast_list;
302 dev->hard_header = macvlan_hard_header;
303 dev->hard_start_xmit = macvlan_hard_start_xmit;
304 dev->destructor = free_netdev;
305 dev->ethtool_ops = &macvlan_ethtool_ops;
306 dev->tx_queue_len = 0;
307}
308
309static int macvlan_port_create(struct net_device *dev)
310{
311 struct macvlan_port *port;
312 unsigned int i;
313
314 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
315 return -EINVAL;
316
317 port = kzalloc(sizeof(*port), GFP_KERNEL);
318 if (port == NULL)
319 return -ENOMEM;
320
321 port->dev = dev;
322 INIT_LIST_HEAD(&port->vlans);
323 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
324 INIT_HLIST_HEAD(&port->vlan_hash[i]);
325 rcu_assign_pointer(dev->macvlan_port, port);
326 return 0;
327}
328
329static void macvlan_port_destroy(struct net_device *dev)
330{
331 struct macvlan_port *port = dev->macvlan_port;
332
333 rcu_assign_pointer(dev->macvlan_port, NULL);
334 synchronize_rcu();
335 kfree(port);
336}
337
338static void macvlan_transfer_operstate(struct net_device *dev)
339{
340 struct macvlan_dev *vlan = netdev_priv(dev);
341 const struct net_device *lowerdev = vlan->lowerdev;
342
343 if (lowerdev->operstate == IF_OPER_DORMANT)
344 netif_dormant_on(dev);
345 else
346 netif_dormant_off(dev);
347
348 if (netif_carrier_ok(lowerdev)) {
349 if (!netif_carrier_ok(dev))
350 netif_carrier_on(dev);
351 } else {
352 if (netif_carrier_ok(lowerdev))
353 netif_carrier_off(dev);
354 }
355}
356
357static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
358{
359 if (tb[IFLA_ADDRESS]) {
360 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
361 return -EINVAL;
362 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
363 return -EADDRNOTAVAIL;
364 }
365 return 0;
366}
367
368static int macvlan_newlink(struct net_device *dev,
369 struct nlattr *tb[], struct nlattr *data[])
370{
371 struct macvlan_dev *vlan = netdev_priv(dev);
372 struct macvlan_port *port;
373 struct net_device *lowerdev;
374 int err;
375
376 if (!tb[IFLA_LINK])
377 return -EINVAL;
378
379 lowerdev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK]));
380 if (lowerdev == NULL)
381 return -ENODEV;
382
383 if (!tb[IFLA_MTU])
384 dev->mtu = lowerdev->mtu;
385 else if (dev->mtu > lowerdev->mtu)
386 return -EINVAL;
387
388 if (!tb[IFLA_ADDRESS])
389 random_ether_addr(dev->dev_addr);
390
391 if (lowerdev->macvlan_port == NULL) {
392 err = macvlan_port_create(lowerdev);
393 if (err < 0)
394 return err;
395 }
396 port = lowerdev->macvlan_port;
397
398 vlan->lowerdev = lowerdev;
399 vlan->dev = dev;
400 vlan->port = port;
401
402 err = register_netdevice(dev);
403 if (err < 0)
404 return err;
405
406 list_add_tail(&vlan->list, &port->vlans);
407 macvlan_transfer_operstate(dev);
408 return 0;
409}
410
411static void macvlan_dellink(struct net_device *dev)
412{
413 struct macvlan_dev *vlan = netdev_priv(dev);
414 struct macvlan_port *port = vlan->port;
415
416 list_del(&vlan->list);
417 unregister_netdevice(dev);
418
419 if (list_empty(&port->vlans))
420 macvlan_port_destroy(dev);
421}
422
423static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
424 .kind = "macvlan",
425 .priv_size = sizeof(struct macvlan_dev),
426 .setup = macvlan_setup,
427 .validate = macvlan_validate,
428 .newlink = macvlan_newlink,
429 .dellink = macvlan_dellink,
430};
431
432static int macvlan_device_event(struct notifier_block *unused,
433 unsigned long event, void *ptr)
434{
435 struct net_device *dev = ptr;
436 struct macvlan_dev *vlan, *next;
437 struct macvlan_port *port;
438
439 port = dev->macvlan_port;
440 if (port == NULL)
441 return NOTIFY_DONE;
442
443 switch (event) {
444 case NETDEV_CHANGE:
445 list_for_each_entry(vlan, &port->vlans, list)
446 macvlan_transfer_operstate(vlan->dev);
447 break;
448 case NETDEV_FEAT_CHANGE:
449 list_for_each_entry(vlan, &port->vlans, list) {
450 vlan->dev->features = dev->features & MACVLAN_FEATURES;
451 netdev_features_change(vlan->dev);
452 }
453 break;
454 case NETDEV_UNREGISTER:
455 list_for_each_entry_safe(vlan, next, &port->vlans, list)
456 macvlan_dellink(vlan->dev);
457 break;
458 }
459 return NOTIFY_DONE;
460}
461
462static struct notifier_block macvlan_notifier_block __read_mostly = {
463 .notifier_call = macvlan_device_event,
464};
465
466static int __init macvlan_init_module(void)
467{
468 int err;
469
470 register_netdevice_notifier(&macvlan_notifier_block);
471 macvlan_handle_frame_hook = macvlan_handle_frame;
472
473 err = rtnl_link_register(&macvlan_link_ops);
474 if (err < 0)
475 goto err1;
476 return 0;
477err1:
478 macvlan_handle_frame_hook = macvlan_handle_frame;
479 unregister_netdevice_notifier(&macvlan_notifier_block);
480 return err;
481}
482
483static void __exit macvlan_cleanup_module(void)
484{
485 rtnl_link_unregister(&macvlan_link_ops);
486 macvlan_handle_frame_hook = NULL;
487 unregister_netdevice_notifier(&macvlan_notifier_block);
488}
489
490module_init(macvlan_init_module);
491module_exit(macvlan_cleanup_module);
492
493MODULE_LICENSE("GPL");
494MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
495MODULE_DESCRIPTION("Driver for MAC address based VLANs");
496MODULE_ALIAS_RTNL_LINK("macvlan");
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index d2b065351e45..c45cbe43a0c4 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -138,6 +138,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
138#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 138#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
139#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 139#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
140#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 140#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
141#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
141#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 142#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
142#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 143#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
143#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 144#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
@@ -220,6 +221,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
220 dev_cap->local_ca_ack_delay = field & 0x1f; 221 dev_cap->local_ca_ack_delay = field & 0x1f;
221 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 222 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
222 dev_cap->num_ports = field & 0xf; 223 dev_cap->num_ports = field & 0xf;
224 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
225 dev_cap->max_msg_sz = 1 << (field & 0x1f);
223 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 226 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
224 dev_cap->stat_rate_support = stat_rate; 227 dev_cap->stat_rate_support = stat_rate;
225 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 228 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 296254ac27c1..7e1dd9e25cfb 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -60,6 +60,7 @@ struct mlx4_dev_cap {
60 int max_rdma_global; 60 int max_rdma_global;
61 int local_ca_ack_delay; 61 int local_ca_ack_delay;
62 int num_ports; 62 int num_ports;
63 u32 max_msg_sz;
63 int max_mtu[MLX4_MAX_PORTS + 1]; 64 int max_mtu[MLX4_MAX_PORTS + 1];
64 int max_port_width[MLX4_MAX_PORTS + 1]; 65 int max_port_width[MLX4_MAX_PORTS + 1];
65 int max_vl[MLX4_MAX_PORTS + 1]; 66 int max_vl[MLX4_MAX_PORTS + 1];
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c3da2a2f5431..a4f2e0475a71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -154,6 +154,7 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev
154 dev->caps.reserved_uars = dev_cap->reserved_uars; 154 dev->caps.reserved_uars = dev_cap->reserved_uars;
155 dev->caps.reserved_pds = dev_cap->reserved_pds; 155 dev->caps.reserved_pds = dev_cap->reserved_pds;
156 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 156 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
157 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
157 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 158 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
158 dev->caps.flags = dev_cap->flags; 159 dev->caps.flags = dev_cap->flags;
159 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 160 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 3d3b6d24d8d3..d9c91a71fc87 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -37,6 +37,7 @@
37#ifndef MLX4_H 37#ifndef MLX4_H
38#define MLX4_H 38#define MLX4_H
39 39
40#include <linux/mutex.h>
40#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
41 42
42#include <linux/mlx4/device.h> 43#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 492cfaaaa75c..19b48c71cf7f 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -277,3 +277,24 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
277 mlx4_CONF_SPECIAL_QP(dev, 0); 277 mlx4_CONF_SPECIAL_QP(dev, 0);
278 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); 278 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
279} 279}
280
281int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
282 struct mlx4_qp_context *context)
283{
284 struct mlx4_cmd_mailbox *mailbox;
285 int err;
286
287 mailbox = mlx4_alloc_cmd_mailbox(dev);
288 if (IS_ERR(mailbox))
289 return PTR_ERR(mailbox);
290
291 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
292 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
293 if (!err)
294 memcpy(context, mailbox->buf + 8, sizeof *context);
295
296 mlx4_free_cmd_mailbox(dev, mailbox);
297 return err;
298}
299EXPORT_SYMBOL_GPL(mlx4_qp_query);
300
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 2134f83aed87..b061c86d6839 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -102,6 +102,13 @@ static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
102 MLX4_CMD_TIME_CLASS_B); 102 MLX4_CMD_TIME_CLASS_B);
103} 103}
104 104
105static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
106 int srq_num)
107{
108 return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
109 MLX4_CMD_TIME_CLASS_A);
110}
111
105int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, 112int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
106 u64 db_rec, struct mlx4_srq *srq) 113 u64 db_rec, struct mlx4_srq *srq)
107{ 114{
@@ -205,6 +212,29 @@ int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark
205} 212}
206EXPORT_SYMBOL_GPL(mlx4_srq_arm); 213EXPORT_SYMBOL_GPL(mlx4_srq_arm);
207 214
215int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
216{
217 struct mlx4_cmd_mailbox *mailbox;
218 struct mlx4_srq_context *srq_context;
219 int err;
220
221 mailbox = mlx4_alloc_cmd_mailbox(dev);
222 if (IS_ERR(mailbox))
223 return PTR_ERR(mailbox);
224
225 srq_context = mailbox->buf;
226
227 err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
228 if (err)
229 goto err_out;
230 *limit_watermark = srq_context->limit_watermark;
231
232err_out:
233 mlx4_free_cmd_mailbox(dev, mailbox);
234 return err;
235}
236EXPORT_SYMBOL_GPL(mlx4_srq_query);
237
208int __devinit mlx4_init_srq_table(struct mlx4_dev *dev) 238int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
209{ 239{
210 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 240 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 460a08718c69..3450051ae56b 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2357 np->rx_dma[entry], 2357 np->rx_dma[entry],
2358 buflen, 2358 buflen,
2359 PCI_DMA_FROMDEVICE); 2359 PCI_DMA_FROMDEVICE);
2360 eth_copy_and_sum(skb, 2360 skb_copy_to_linear_data(skb,
2361 np->rx_skbuff[entry]->data, pkt_len, 0); 2361 np->rx_skbuff[entry]->data, pkt_len);
2362 skb_put(skb, pkt_len); 2362 skb_put(skb, pkt_len);
2363 pci_dma_sync_single_for_device(np->pci_dev, 2363 pci_dma_sync_single_for_device(np->pci_dev,
2364 np->rx_dma[entry], 2364 np->rx_dma[entry],
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 56f8197b953b..b703ccfe040b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -54,8 +54,6 @@ static char netxen_nic_driver_string[] = "NetXen Network Driver version "
54#define NETXEN_ADAPTER_UP_MAGIC 777 54#define NETXEN_ADAPTER_UP_MAGIC 777
55#define NETXEN_NIC_PEG_TUNE 0 55#define NETXEN_NIC_PEG_TUNE 0
56 56
57u8 nx_p2_id = NX_P2_C0;
58
59#define DMA_32BIT_MASK 0x00000000ffffffffULL 57#define DMA_32BIT_MASK 0x00000000ffffffffULL
60#define DMA_35BIT_MASK 0x00000007ffffffffULL 58#define DMA_35BIT_MASK 0x00000007ffffffffULL
61 59
@@ -307,8 +305,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
307 goto err_out_disable_pdev; 305 goto err_out_disable_pdev;
308 306
309 pci_set_master(pdev); 307 pci_set_master(pdev);
310 pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id); 308 if (pdev->revision == NX_P2_C1 &&
311 if (nx_p2_id == NX_P2_C1 &&
312 (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) && 309 (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
313 (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) { 310 (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
314 pci_using_dac = 1; 311 pci_using_dac = 1;
@@ -552,7 +549,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
552 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); 549 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
553 adapter->ahw.pdev = pdev; 550 adapter->ahw.pdev = pdev;
554 adapter->proc_cmd_buf_counter = 0; 551 adapter->proc_cmd_buf_counter = 0;
555 adapter->ahw.revision_id = nx_p2_id; 552 adapter->ahw.revision_id = pdev->revision;
556 553
557 /* make sure Window == 1 */ 554 /* make sure Window == 1 */
558 netxen_nic_pci_change_crbwindow(adapter, 1); 555 netxen_nic_pci_change_crbwindow(adapter, 1);
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 8dbd6d1900b5..5e7999db2096 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev)
936 { 936 {
937 skb_reserve(skb,2); 937 skb_reserve(skb,2);
938 skb_put(skb,totlen); 938 skb_put(skb,totlen);
939 eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); 939 skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
940 skb->protocol=eth_type_trans(skb,dev); 940 skb->protocol=eth_type_trans(skb,dev);
941 netif_rx(skb); 941 netif_rx(skb);
942 dev->last_rx = jiffies; 942 dev->last_rx = jiffies;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 3818edf0ac18..4ef5fe345191 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1096#ifdef RCV_VIA_SKB 1096#ifdef RCV_VIA_SKB
1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { 1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1098 skb_put(skb,len); 1098 skb_put(skb,len);
1099 eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0); 1099 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1100 } 1100 }
1101 else { 1101 else {
1102 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; 1102 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
@@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1108 } 1108 }
1109#else 1109#else
1110 skb_put(skb,len); 1110 skb_put(skb,len);
1111 eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0); 1111 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1112#endif 1112#endif
1113 p->stats.rx_packets++; 1113 p->stats.rx_packets++;
1114 p->stats.rx_bytes += len; 1114 p->stats.rx_bytes += len;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index df8998b4f37e..3cdbe118200b 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1567 if (skb) { 1567 if (skb) {
1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */
1569 1569
1570 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 1570 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
1571 skb_put (skb, pkt_size); 1571 skb_put (skb, pkt_size);
1572 1572
1573 skb->protocol = eth_type_trans (skb, dev); 1573 skb->protocol = eth_type_trans (skb, dev);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 9c171a7390e2..465485a3fbc6 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev,
1235 lp->rx_dma_addr[entry], 1235 lp->rx_dma_addr[entry],
1236 pkt_len, 1236 pkt_len,
1237 PCI_DMA_FROMDEVICE); 1237 PCI_DMA_FROMDEVICE);
1238 eth_copy_and_sum(skb, 1238 skb_copy_to_linear_data(skb,
1239 (unsigned char *)(lp->rx_skbuff[entry]->data), 1239 (unsigned char *)(lp->rx_skbuff[entry]->data),
1240 pkt_len, 0); 1240 pkt_len);
1241 pci_dma_sync_single_for_device(lp->pci_dev, 1241 pci_dma_sync_single_for_device(lp->pci_dev,
1242 lp->rx_dma_addr[entry], 1242 lp->rx_dma_addr[entry],
1243 pkt_len, 1243 pkt_len,
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
new file mode 100644
index 000000000000..5891a0fbdc8b
--- /dev/null
+++ b/drivers/net/pppol2tp.c
@@ -0,0 +1,2486 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 1.0.0
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * License:
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24/* This driver handles only L2TP data frames; control frames are handled by a
25 * userspace application.
26 *
27 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
28 * attaches it to a bound UDP socket with local tunnel_id / session_id and
29 * peer tunnel_id / session_id set. Data can then be sent or received using
30 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
31 * can be read or modified using ioctl() or [gs]etsockopt() calls.
32 *
33 * When a PPPoL2TP socket is connected with local and peer session_id values
34 * zero, the socket is treated as a special tunnel management socket.
35 *
36 * Here's example userspace code to create a socket for sending/receiving data
37 * over an L2TP session:-
38 *
39 * struct sockaddr_pppol2tp sax;
40 * int fd;
41 * int session_fd;
42 *
43 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
44 *
45 * sax.sa_family = AF_PPPOX;
46 * sax.sa_protocol = PX_PROTO_OL2TP;
47 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
48 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
49 * sax.pppol2tp.addr.sin_port = addr->sin_port;
50 * sax.pppol2tp.addr.sin_family = AF_INET;
51 * sax.pppol2tp.s_tunnel = tunnel_id;
52 * sax.pppol2tp.s_session = session_id;
53 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
54 * sax.pppol2tp.d_session = peer_session_id;
55 *
56 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
57 *
58 * A pppd plugin that allows PPP traffic to be carried over L2TP using
59 * this driver is available from the OpenL2TP project at
60 * http://openl2tp.sourceforge.net.
61 */
62
63#include <linux/module.h>
64#include <linux/version.h>
65#include <linux/string.h>
66#include <linux/list.h>
67#include <asm/uaccess.h>
68
69#include <linux/kernel.h>
70#include <linux/spinlock.h>
71#include <linux/kthread.h>
72#include <linux/sched.h>
73#include <linux/slab.h>
74#include <linux/errno.h>
75#include <linux/jiffies.h>
76
77#include <linux/netdevice.h>
78#include <linux/net.h>
79#include <linux/inetdevice.h>
80#include <linux/skbuff.h>
81#include <linux/init.h>
82#include <linux/ip.h>
83#include <linux/udp.h>
84#include <linux/if_pppox.h>
85#include <linux/if_pppol2tp.h>
86#include <net/sock.h>
87#include <linux/ppp_channel.h>
88#include <linux/ppp_defs.h>
89#include <linux/if_ppp.h>
90#include <linux/file.h>
91#include <linux/hash.h>
92#include <linux/sort.h>
93#include <linux/proc_fs.h>
94#include <net/dst.h>
95#include <net/ip.h>
96#include <net/udp.h>
97#include <net/xfrm.h>
98
99#include <asm/byteorder.h>
100#include <asm/atomic.h>
101
102
103#define PPPOL2TP_DRV_VERSION "V1.0"
104
105/* L2TP header constants */
106#define L2TP_HDRFLAG_T 0x8000
107#define L2TP_HDRFLAG_L 0x4000
108#define L2TP_HDRFLAG_S 0x0800
109#define L2TP_HDRFLAG_O 0x0200
110#define L2TP_HDRFLAG_P 0x0100
111
112#define L2TP_HDR_VER_MASK 0x000F
113#define L2TP_HDR_VER 0x0002
114
115/* Space for UDP, L2TP and PPP headers */
116#define PPPOL2TP_HEADER_OVERHEAD 40
117
118/* Just some random numbers */
119#define L2TP_TUNNEL_MAGIC 0x42114DDA
120#define L2TP_SESSION_MAGIC 0x0C04EB7D
121
122#define PPPOL2TP_HASH_BITS 4
123#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
124
125/* Default trace flags */
126#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
127
128#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
129 do { \
130 if ((_mask) & (_type)) \
131 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
132 } while(0)
133
134/* Number of bytes to build transmit L2TP headers.
135 * Unfortunately the size is different depending on whether sequence numbers
136 * are enabled.
137 */
138#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
139#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
140
141struct pppol2tp_tunnel;
142
143/* Describes a session. It is the sk_user_data field in the PPPoL2TP
144 * socket. Contains information to determine incoming packets and transmit
145 * outgoing ones.
146 */
147struct pppol2tp_session
148{
149 int magic; /* should be
150 * L2TP_SESSION_MAGIC */
151 int owner; /* pid that opened the socket */
152
153 struct sock *sock; /* Pointer to the session
154 * PPPoX socket */
155 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
156 * socket */
157
158 struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
159
160 struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
161 * context */
162
163 char name[20]; /* "sess xxxxx/yyyyy", where
164 * x=tunnel_id, y=session_id */
165 int mtu;
166 int mru;
167 int flags; /* accessed by PPPIOCGFLAGS.
168 * Unused. */
169 unsigned recv_seq:1; /* expect receive packets with
170 * sequence numbers? */
171 unsigned send_seq:1; /* send packets with sequence
172 * numbers? */
173 unsigned lns_mode:1; /* behave as LNS? LAC enables
174 * sequence numbers under
175 * control of LNS. */
176 int debug; /* bitmask of debug message
177 * categories */
178 int reorder_timeout; /* configured reorder timeout
179 * (in jiffies) */
180 u16 nr; /* session NR state (receive) */
181 u16 ns; /* session NR state (send) */
182 struct sk_buff_head reorder_q; /* receive reorder queue */
183 struct pppol2tp_ioc_stats stats;
184 struct hlist_node hlist; /* Hash list node */
185};
186
187/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
188 * all the associated sessions so incoming packets can be sorted out
189 */
190struct pppol2tp_tunnel
191{
192 int magic; /* Should be L2TP_TUNNEL_MAGIC */
193 rwlock_t hlist_lock; /* protect session_hlist */
194 struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
195 /* hashed list of sessions,
196 * hashed by id */
197 int debug; /* bitmask of debug message
198 * categories */
199 char name[12]; /* "tunl xxxxx" */
200 struct pppol2tp_ioc_stats stats;
201
202 void (*old_sk_destruct)(struct sock *);
203
204 struct sock *sock; /* Parent socket */
205 struct list_head list; /* Keep a list of all open
206 * prepared sockets */
207
208 atomic_t ref_count;
209};
210
211/* Private data stored for received packets in the skb.
212 */
213struct pppol2tp_skb_cb {
214 u16 ns;
215 u16 nr;
216 u16 has_seq;
217 u16 length;
218 unsigned long expires;
219};
220
221#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
222
223static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
224static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
225
226static atomic_t pppol2tp_tunnel_count;
227static atomic_t pppol2tp_session_count;
228static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
229static struct proto_ops pppol2tp_ops;
230static LIST_HEAD(pppol2tp_tunnel_list);
231static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);
232
233/* Helpers to obtain tunnel/session contexts from sockets.
234 */
235static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
236{
237 struct pppol2tp_session *session;
238
239 if (sk == NULL)
240 return NULL;
241
242 session = (struct pppol2tp_session *)(sk->sk_user_data);
243 if (session == NULL)
244 return NULL;
245
246 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
247
248 return session;
249}
250
251static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
252{
253 struct pppol2tp_tunnel *tunnel;
254
255 if (sk == NULL)
256 return NULL;
257
258 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
259 if (tunnel == NULL)
260 return NULL;
261
262 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
263
264 return tunnel;
265}
266
267/* Tunnel reference counts. Incremented per session that is added to
268 * the tunnel.
269 */
270static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
271{
272 atomic_inc(&tunnel->ref_count);
273}
274
275static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
276{
277 if (atomic_dec_and_test(&tunnel->ref_count))
278 pppol2tp_tunnel_free(tunnel);
279}
280
281/* Session hash list.
282 * The session_id SHOULD be random according to RFC2661, but several
283 * L2TP implementations (Cisco and Microsoft) use incrementing
284 * session_ids. So we do a real hash on the session_id, rather than a
285 * simple bitmask.
286 */
287static inline struct hlist_head *
288pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
289{
290 unsigned long hash_val = (unsigned long) session_id;
291 return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
292}
293
294/* Lookup a session by id
295 */
296static struct pppol2tp_session *
297pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
298{
299 struct hlist_head *session_list =
300 pppol2tp_session_id_hash(tunnel, session_id);
301 struct pppol2tp_session *session;
302 struct hlist_node *walk;
303
304 read_lock(&tunnel->hlist_lock);
305 hlist_for_each_entry(session, walk, session_list, hlist) {
306 if (session->tunnel_addr.s_session == session_id) {
307 read_unlock(&tunnel->hlist_lock);
308 return session;
309 }
310 }
311 read_unlock(&tunnel->hlist_lock);
312
313 return NULL;
314}
315
316/* Lookup a tunnel by id
317 */
318static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
319{
320 struct pppol2tp_tunnel *tunnel = NULL;
321
322 read_lock(&pppol2tp_tunnel_list_lock);
323 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
324 if (tunnel->stats.tunnel_id == tunnel_id) {
325 read_unlock(&pppol2tp_tunnel_list_lock);
326 return tunnel;
327 }
328 }
329 read_unlock(&pppol2tp_tunnel_list_lock);
330
331 return NULL;
332}
333
334/*****************************************************************************
335 * Receive data handling
336 *****************************************************************************/
337
338/* Queue a skb in order. We come here only if the skb has an L2TP sequence
339 * number.
340 */
341static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
342{
343 struct sk_buff *skbp;
344 u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
345
346 spin_lock(&session->reorder_q.lock);
347 skb_queue_walk(&session->reorder_q, skbp) {
348 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
349 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
350 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
351 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
352 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
353 skb_queue_len(&session->reorder_q));
354 session->stats.rx_oos_packets++;
355 goto out;
356 }
357 }
358
359 __skb_queue_tail(&session->reorder_q, skb);
360
361out:
362 spin_unlock(&session->reorder_q.lock);
363}
364
365/* Dequeue a single skb.
366 */
367static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
368{
369 struct pppol2tp_tunnel *tunnel = session->tunnel;
370 int length = PPPOL2TP_SKB_CB(skb)->length;
371 struct sock *session_sock = NULL;
372
373 /* We're about to requeue the skb, so unlink it and return resources
374 * to its current owner (a socket receive buffer).
375 */
376 skb_unlink(skb, &session->reorder_q);
377 skb_orphan(skb);
378
379 tunnel->stats.rx_packets++;
380 tunnel->stats.rx_bytes += length;
381 session->stats.rx_packets++;
382 session->stats.rx_bytes += length;
383
384 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
385 /* Bump our Nr */
386 session->nr++;
387 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
388 "%s: updated nr to %hu\n", session->name, session->nr);
389 }
390
391 /* If the socket is bound, send it in to PPP's input queue. Otherwise
392 * queue it on the session socket.
393 */
394 session_sock = session->sock;
395 if (session_sock->sk_state & PPPOX_BOUND) {
396 struct pppox_sock *po;
397 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
398 "%s: recv %d byte data frame, passing to ppp\n",
399 session->name, length);
400
401 /* We need to forget all info related to the L2TP packet
402 * gathered in the skb as we are going to reuse the same
403 * skb for the inner packet.
404 * Namely we need to:
405 * - reset xfrm (IPSec) information as it applies to
406 * the outer L2TP packet and not to the inner one
407 * - release the dst to force a route lookup on the inner
408 * IP packet since skb->dst currently points to the dst
409 * of the UDP tunnel
410 * - reset netfilter information as it doesn't apply
411 * to the inner packet either
412 */
413 secpath_reset(skb);
414 dst_release(skb->dst);
415 skb->dst = NULL;
416 nf_reset(skb);
417
418 po = pppox_sk(session_sock);
419 ppp_input(&po->chan, skb);
420 } else {
421 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
422 "%s: socket not bound\n", session->name);
423
424 /* Not bound. Nothing we can do, so discard. */
425 session->stats.rx_errors++;
426 kfree_skb(skb);
427 }
428
429 sock_put(session->sock);
430}
431
432/* Dequeue skbs from the session's reorder_q, subject to packet order.
433 * Skbs that have been in the queue for too long are simply discarded.
434 */
435static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
436{
437 struct sk_buff *skb;
438 struct sk_buff *tmp;
439
440 /* If the pkt at the head of the queue has the nr that we
441 * expect to send up next, dequeue it and any other
442 * in-sequence packets behind it.
443 */
444 spin_lock(&session->reorder_q.lock);
445 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
446 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
447 session->stats.rx_seq_discards++;
448 session->stats.rx_errors++;
449 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
450 "%s: oos pkt %hu len %d discarded (too old), "
451 "waiting for %hu, reorder_q_len=%d\n",
452 session->name, PPPOL2TP_SKB_CB(skb)->ns,
453 PPPOL2TP_SKB_CB(skb)->length, session->nr,
454 skb_queue_len(&session->reorder_q));
455 __skb_unlink(skb, &session->reorder_q);
456 kfree_skb(skb);
457 continue;
458 }
459
460 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
461 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
462 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
463 "%s: holding oos pkt %hu len %d, "
464 "waiting for %hu, reorder_q_len=%d\n",
465 session->name, PPPOL2TP_SKB_CB(skb)->ns,
466 PPPOL2TP_SKB_CB(skb)->length, session->nr,
467 skb_queue_len(&session->reorder_q));
468 goto out;
469 }
470 }
471 spin_unlock(&session->reorder_q.lock);
472 pppol2tp_recv_dequeue_skb(session, skb);
473 spin_lock(&session->reorder_q.lock);
474 }
475
476out:
477 spin_unlock(&session->reorder_q.lock);
478}
479
480/* Internal receive frame. Do the real work of receiving an L2TP data frame
481 * here. The skb is not on a list when we get here.
482 * Returns 0 if the packet was a data packet and was successfully passed on.
483 * Returns 1 if the packet was not a good data packet and could not be
484 * forwarded. All such packets are passed up to userspace to deal with.
485 */
486static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
487{
488 struct pppol2tp_session *session = NULL;
489 struct pppol2tp_tunnel *tunnel;
490 unsigned char *ptr;
491 u16 hdrflags;
492 u16 tunnel_id, session_id;
493 int length;
494 struct udphdr *uh;
495
496 tunnel = pppol2tp_sock_to_tunnel(sock);
497 if (tunnel == NULL)
498 goto error;
499
500 /* Short packet? */
501 if (skb->len < sizeof(struct udphdr)) {
502 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
503 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
504 goto error;
505 }
506
507 /* Point to L2TP header */
508 ptr = skb->data + sizeof(struct udphdr);
509
510 /* Get L2TP header flags */
511 hdrflags = ntohs(*(__be16*)ptr);
512
513 /* Trace packet contents, if enabled */
514 if (tunnel->debug & PPPOL2TP_MSG_DATA) {
515 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
516
517 for (length = 0; length < 16; length++)
518 printk(" %02X", ptr[length]);
519 printk("\n");
520 }
521
522 /* Get length of L2TP packet */
523 uh = (struct udphdr *) skb_transport_header(skb);
524 length = ntohs(uh->len) - sizeof(struct udphdr);
525
526 /* Too short? */
527 if (length < 12) {
528 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
529 "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length);
530 goto error;
531 }
532
533 /* If type is control packet, it is handled by userspace. */
534 if (hdrflags & L2TP_HDRFLAG_T) {
535 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
536 "%s: recv control packet, len=%d\n", tunnel->name, length);
537 goto error;
538 }
539
540 /* Skip flags */
541 ptr += 2;
542
543 /* If length is present, skip it */
544 if (hdrflags & L2TP_HDRFLAG_L)
545 ptr += 2;
546
547 /* Extract tunnel and session ID */
548 tunnel_id = ntohs(*(__be16 *) ptr);
549 ptr += 2;
550 session_id = ntohs(*(__be16 *) ptr);
551 ptr += 2;
552
553 /* Find the session context */
554 session = pppol2tp_session_find(tunnel, session_id);
555 if (!session) {
556 /* Not found? Pass to userspace to deal with */
557 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
558 "%s: no socket found (%hu/%hu). Passing up.\n",
559 tunnel->name, tunnel_id, session_id);
560 goto error;
561 }
562 sock_hold(session->sock);
563
564 /* The ref count on the socket was increased by the above call since
565 * we now hold a pointer to the session. Take care to do sock_put()
566 * when exiting this function from now on...
567 */
568
569 /* Handle the optional sequence numbers. If we are the LAC,
570 * enable/disable sequence numbers under the control of the LNS. If
571 * no sequence numbers present but we were expecting them, discard
572 * frame.
573 */
574 if (hdrflags & L2TP_HDRFLAG_S) {
575 u16 ns, nr;
576 ns = ntohs(*(__be16 *) ptr);
577 ptr += 2;
578 nr = ntohs(*(__be16 *) ptr);
579 ptr += 2;
580
581 /* Received a packet with sequence numbers. If we're the LNS,
582 * check if we sre sending sequence numbers and if not,
583 * configure it so.
584 */
585 if ((!session->lns_mode) && (!session->send_seq)) {
586 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
587 "%s: requested to enable seq numbers by LNS\n",
588 session->name);
589 session->send_seq = -1;
590 }
591
592 /* Store L2TP info in the skb */
593 PPPOL2TP_SKB_CB(skb)->ns = ns;
594 PPPOL2TP_SKB_CB(skb)->nr = nr;
595 PPPOL2TP_SKB_CB(skb)->has_seq = 1;
596
597 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
598 "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
599 session->name, ns, nr, session->nr);
600 } else {
601 /* No sequence numbers.
602 * If user has configured mandatory sequence numbers, discard.
603 */
604 if (session->recv_seq) {
605 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
606 "%s: recv data has no seq numbers when required. "
607 "Discarding\n", session->name);
608 session->stats.rx_seq_discards++;
609 session->stats.rx_errors++;
610 goto discard;
611 }
612
613 /* If we're the LAC and we're sending sequence numbers, the
614 * LNS has requested that we no longer send sequence numbers.
615 * If we're the LNS and we're sending sequence numbers, the
616 * LAC is broken. Discard the frame.
617 */
618 if ((!session->lns_mode) && (session->send_seq)) {
619 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
620 "%s: requested to disable seq numbers by LNS\n",
621 session->name);
622 session->send_seq = 0;
623 } else if (session->send_seq) {
624 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
625 "%s: recv data has no seq numbers when required. "
626 "Discarding\n", session->name);
627 session->stats.rx_seq_discards++;
628 session->stats.rx_errors++;
629 goto discard;
630 }
631
632 /* Store L2TP info in the skb */
633 PPPOL2TP_SKB_CB(skb)->has_seq = 0;
634 }
635
636 /* If offset bit set, skip it. */
637 if (hdrflags & L2TP_HDRFLAG_O)
638 ptr += 2 + ntohs(*(__be16 *) ptr);
639
640 skb_pull(skb, ptr - skb->data);
641
642 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
643 * don't send the PPP header (PPP header compression enabled), but
644 * other clients can include the header. So we cope with both cases
645 * here. The PPP header is always FF03 when using L2TP.
646 *
647 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
648 * the field may be unaligned.
649 */
650 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
651 skb_pull(skb, 2);
652
653 /* Prepare skb for adding to the session's reorder_q. Hold
654 * packets for max reorder_timeout or 1 second if not
655 * reordering.
656 */
657 PPPOL2TP_SKB_CB(skb)->length = length;
658 PPPOL2TP_SKB_CB(skb)->expires = jiffies +
659 (session->reorder_timeout ? session->reorder_timeout : HZ);
660
661 /* Add packet to the session's receive queue. Reordering is done here, if
662 * enabled. Saved L2TP protocol info is stored in skb->sb[].
663 */
664 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
665 if (session->reorder_timeout != 0) {
666 /* Packet reordering enabled. Add skb to session's
667 * reorder queue, in order of ns.
668 */
669 pppol2tp_recv_queue_skb(session, skb);
670 } else {
671 /* Packet reordering disabled. Discard out-of-sequence
672 * packets
673 */
674 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
675 session->stats.rx_seq_discards++;
676 session->stats.rx_errors++;
677 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
678 "%s: oos pkt %hu len %d discarded, "
679 "waiting for %hu, reorder_q_len=%d\n",
680 session->name, PPPOL2TP_SKB_CB(skb)->ns,
681 PPPOL2TP_SKB_CB(skb)->length, session->nr,
682 skb_queue_len(&session->reorder_q));
683 goto discard;
684 }
685 skb_queue_tail(&session->reorder_q, skb);
686 }
687 } else {
688 /* No sequence numbers. Add the skb to the tail of the
689 * reorder queue. This ensures that it will be
690 * delivered after all previous sequenced skbs.
691 */
692 skb_queue_tail(&session->reorder_q, skb);
693 }
694
695 /* Try to dequeue as many skbs from reorder_q as we can. */
696 pppol2tp_recv_dequeue(session);
697
698 return 0;
699
700discard:
701 kfree_skb(skb);
702 sock_put(session->sock);
703
704 return 0;
705
706error:
707 return 1;
708}
709
710/* UDP encapsulation receive handler. See net/ipv4/udp.c.
711 * Return codes:
712 * 0 : success.
713 * <0: error
714 * >0: skb should be passed up to userspace as UDP.
715 */
716static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
717{
718 struct pppol2tp_tunnel *tunnel;
719
720 tunnel = pppol2tp_sock_to_tunnel(sk);
721 if (tunnel == NULL)
722 goto pass_up;
723
724 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
725 "%s: received %d bytes\n", tunnel->name, skb->len);
726
727 if (pppol2tp_recv_core(sk, skb))
728 goto pass_up;
729
730 return 0;
731
732pass_up:
733 return 1;
734}
735
736/* Receive message. This is the recvmsg for the PPPoL2TP socket.
737 */
738static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
739 struct msghdr *msg, size_t len,
740 int flags)
741{
742 int err;
743 struct sk_buff *skb;
744 struct sock *sk = sock->sk;
745
746 err = -EIO;
747 if (sk->sk_state & PPPOX_BOUND)
748 goto end;
749
750 msg->msg_namelen = 0;
751
752 err = 0;
753 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
754 flags & MSG_DONTWAIT, &err);
755 if (skb) {
756 err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
757 skb->len);
758 if (err < 0)
759 goto do_skb_free;
760 err = skb->len;
761 }
762do_skb_free:
763 kfree_skb(skb);
764end:
765 return err;
766}
767
768/************************************************************************
769 * Transmit handling
770 ***********************************************************************/
771
772/* Tell how big L2TP headers are for a particular session. This
773 * depends on whether sequence numbers are being used.
774 */
775static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
776{
777 if (session->send_seq)
778 return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
779
780 return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
781}
782
783/* Build an L2TP header for the session into the buffer provided.
784 */
785static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
786 void *buf)
787{
788 __be16 *bufp = buf;
789 u16 flags = L2TP_HDR_VER;
790
791 if (session->send_seq)
792 flags |= L2TP_HDRFLAG_S;
793
794 /* Setup L2TP header.
795 * FIXME: Can this ever be unaligned? Is direct dereferencing of
796 * 16-bit header fields safe here for all architectures?
797 */
798 *bufp++ = htons(flags);
799 *bufp++ = htons(session->tunnel_addr.d_tunnel);
800 *bufp++ = htons(session->tunnel_addr.d_session);
801 if (session->send_seq) {
802 *bufp++ = htons(session->ns);
803 *bufp++ = 0;
804 session->ns++;
805 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
806 "%s: updated ns to %hu\n", session->name, session->ns);
807 }
808}
809
810/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
811 * when a user application does a sendmsg() on the session socket. L2TP and
812 * PPP headers must be inserted into the user's data.
813 */
814static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
815 size_t total_len)
816{
817 static const unsigned char ppph[2] = { 0xff, 0x03 };
818 struct sock *sk = sock->sk;
819 struct inet_sock *inet;
820 __wsum csum = 0;
821 struct sk_buff *skb;
822 int error;
823 int hdr_len;
824 struct pppol2tp_session *session;
825 struct pppol2tp_tunnel *tunnel;
826 struct udphdr *uh;
827
828 error = -ENOTCONN;
829 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
830 goto error;
831
832 /* Get session and tunnel contexts */
833 error = -EBADF;
834 session = pppol2tp_sock_to_session(sk);
835 if (session == NULL)
836 goto error;
837
838 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
839 if (tunnel == NULL)
840 goto error;
841
842 /* What header length is configured for this session? */
843 hdr_len = pppol2tp_l2tp_header_len(session);
844
845 /* Allocate a socket buffer */
846 error = -ENOMEM;
847 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
848 sizeof(struct udphdr) + hdr_len +
849 sizeof(ppph) + total_len,
850 0, GFP_KERNEL);
851 if (!skb)
852 goto error;
853
854 /* Reserve space for headers. */
855 skb_reserve(skb, NET_SKB_PAD);
856 skb_reset_network_header(skb);
857 skb_reserve(skb, sizeof(struct iphdr));
858 skb_reset_transport_header(skb);
859
860 /* Build UDP header */
861 inet = inet_sk(session->tunnel_sock);
862 uh = (struct udphdr *) skb->data;
863 uh->source = inet->sport;
864 uh->dest = inet->dport;
865 uh->len = htons(hdr_len + sizeof(ppph) + total_len);
866 uh->check = 0;
867 skb_put(skb, sizeof(struct udphdr));
868
869 /* Build L2TP header */
870 pppol2tp_build_l2tp_header(session, skb->data);
871 skb_put(skb, hdr_len);
872
873 /* Add PPP header */
874 skb->data[0] = ppph[0];
875 skb->data[1] = ppph[1];
876 skb_put(skb, 2);
877
878 /* Copy user data into skb */
879 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
880 if (error < 0) {
881 kfree_skb(skb);
882 goto error;
883 }
884 skb_put(skb, total_len);
885
886 /* Calculate UDP checksum if configured to do so */
887 if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT)
888 csum = udp_csum_outgoing(sk, skb);
889
890 /* Debug */
891 if (session->send_seq)
892 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
893 "%s: send %Zd bytes, ns=%hu\n", session->name,
894 total_len, session->ns - 1);
895 else
896 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
897 "%s: send %Zd bytes\n", session->name, total_len);
898
899 if (session->debug & PPPOL2TP_MSG_DATA) {
900 int i;
901 unsigned char *datap = skb->data;
902
903 printk(KERN_DEBUG "%s: xmit:", session->name);
904 for (i = 0; i < total_len; i++) {
905 printk(" %02X", *datap++);
906 if (i == 15) {
907 printk(" ...");
908 break;
909 }
910 }
911 printk("\n");
912 }
913
914 /* Queue the packet to IP for output */
915 error = ip_queue_xmit(skb, 1);
916
917 /* Update stats */
918 if (error >= 0) {
919 tunnel->stats.tx_packets++;
920 tunnel->stats.tx_bytes += skb->len;
921 session->stats.tx_packets++;
922 session->stats.tx_bytes += skb->len;
923 } else {
924 tunnel->stats.tx_errors++;
925 session->stats.tx_errors++;
926 }
927
928error:
929 return error;
930}
931
932/* Transmit function called by generic PPP driver. Sends PPP frame
933 * over PPPoL2TP socket.
934 *
935 * This is almost the same as pppol2tp_sendmsg(), but rather than
936 * being called with a msghdr from userspace, it is called with a skb
937 * from the kernel.
938 *
939 * The supplied skb from ppp doesn't have enough headroom for the
940 * insertion of L2TP, UDP and IP headers so we need to allocate more
941 * headroom in the skb. This will create a cloned skb. But we must be
942 * careful in the error case because the caller will expect to free
943 * the skb it supplied, not our cloned skb. So we take care to always
944 * leave the original skb unfreed if we return an error.
945 */
946static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
947{
948 static const u8 ppph[2] = { 0xff, 0x03 };
949 struct sock *sk = (struct sock *) chan->private;
950 struct sock *sk_tun;
951 int hdr_len;
952 struct pppol2tp_session *session;
953 struct pppol2tp_tunnel *tunnel;
954 int rc;
955 int headroom;
956 int data_len = skb->len;
957 struct inet_sock *inet;
958 __wsum csum = 0;
959 struct sk_buff *skb2 = NULL;
960 struct udphdr *uh;
961
962 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
963 goto abort;
964
965 /* Get session and tunnel contexts from the socket */
966 session = pppol2tp_sock_to_session(sk);
967 if (session == NULL)
968 goto abort;
969
970 sk_tun = session->tunnel_sock;
971 if (sk_tun == NULL)
972 goto abort;
973 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
974 if (tunnel == NULL)
975 goto abort;
976
977 /* What header length is configured for this session? */
978 hdr_len = pppol2tp_l2tp_header_len(session);
979
980 /* Check that there's enough headroom in the skb to insert IP,
981 * UDP and L2TP and PPP headers. If not enough, expand it to
982 * make room. Note that a new skb (or a clone) is
983 * allocated. If we return an error from this point on, make
984 * sure we free the new skb but do not free the original skb
985 * since that is done by the caller for the error case.
986 */
987 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
988 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
989 if (skb_headroom(skb) < headroom) {
990 skb2 = skb_realloc_headroom(skb, headroom);
991 if (skb2 == NULL)
992 goto abort;
993 } else
994 skb2 = skb;
995
996 /* Check that the socket has room */
997 if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf)
998 skb_set_owner_w(skb2, sk_tun);
999 else
1000 goto discard;
1001
1002 /* Setup PPP header */
1003 skb_push(skb2, sizeof(ppph));
1004 skb2->data[0] = ppph[0];
1005 skb2->data[1] = ppph[1];
1006
1007 /* Setup L2TP header */
1008 skb_push(skb2, hdr_len);
1009 pppol2tp_build_l2tp_header(session, skb2->data);
1010
1011 /* Setup UDP header */
1012 inet = inet_sk(sk_tun);
1013 skb_push(skb2, sizeof(struct udphdr));
1014 skb_reset_transport_header(skb2);
1015 uh = (struct udphdr *) skb2->data;
1016 uh->source = inet->sport;
1017 uh->dest = inet->dport;
1018 uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
1019 uh->check = 0;
1020
1021 /* Calculate UDP checksum if configured to do so */
1022 if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
1023 csum = udp_csum_outgoing(sk_tun, skb2);
1024
1025 /* Debug */
1026 if (session->send_seq)
1027 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1028 "%s: send %d bytes, ns=%hu\n", session->name,
1029 data_len, session->ns - 1);
1030 else
1031 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1032 "%s: send %d bytes\n", session->name, data_len);
1033
1034 if (session->debug & PPPOL2TP_MSG_DATA) {
1035 int i;
1036 unsigned char *datap = skb2->data;
1037
1038 printk(KERN_DEBUG "%s: xmit:", session->name);
1039 for (i = 0; i < data_len; i++) {
1040 printk(" %02X", *datap++);
1041 if (i == 31) {
1042 printk(" ...");
1043 break;
1044 }
1045 }
1046 printk("\n");
1047 }
1048
1049 /* Get routing info from the tunnel socket */
1050 skb2->dst = sk_dst_get(sk_tun);
1051
1052 /* Queue the packet to IP for output */
1053 rc = ip_queue_xmit(skb2, 1);
1054
1055 /* Update stats */
1056 if (rc >= 0) {
1057 tunnel->stats.tx_packets++;
1058 tunnel->stats.tx_bytes += skb2->len;
1059 session->stats.tx_packets++;
1060 session->stats.tx_bytes += skb2->len;
1061 } else {
1062 tunnel->stats.tx_errors++;
1063 session->stats.tx_errors++;
1064 }
1065
1066 /* Free the original skb */
1067 kfree_skb(skb);
1068
1069 return 1;
1070
1071discard:
1072 /* Free the new skb. Caller will free original skb. */
1073 if (skb2 != skb)
1074 kfree_skb(skb2);
1075abort:
1076 return 0;
1077}
1078
1079/*****************************************************************************
1080 * Session (and tunnel control) socket create/destroy.
1081 *****************************************************************************/
1082
1083/* When the tunnel UDP socket is closed, all the attached sockets need to go
1084 * too.
1085 */
1086static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1087{
1088 int hash;
1089 struct hlist_node *walk;
1090 struct hlist_node *tmp;
1091 struct pppol2tp_session *session;
1092 struct sock *sk;
1093
1094 if (tunnel == NULL)
1095 BUG();
1096
1097 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1098 "%s: closing all sessions...\n", tunnel->name);
1099
1100 write_lock(&tunnel->hlist_lock);
1101 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1102again:
1103 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1104 session = hlist_entry(walk, struct pppol2tp_session, hlist);
1105
1106 sk = session->sock;
1107
1108 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1109 "%s: closing session\n", session->name);
1110
1111 hlist_del_init(&session->hlist);
1112
1113 /* Since we should hold the sock lock while
1114 * doing any unbinding, we need to release the
1115 * lock we're holding before taking that lock.
1116 * Hold a reference to the sock so it doesn't
1117 * disappear as we're jumping between locks.
1118 */
1119 sock_hold(sk);
1120 write_unlock(&tunnel->hlist_lock);
1121 lock_sock(sk);
1122
1123 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
1124 pppox_unbind_sock(sk);
1125 sk->sk_state = PPPOX_DEAD;
1126 sk->sk_state_change(sk);
1127 }
1128
1129 /* Purge any queued data */
1130 skb_queue_purge(&sk->sk_receive_queue);
1131 skb_queue_purge(&sk->sk_write_queue);
1132 skb_queue_purge(&session->reorder_q);
1133
1134 release_sock(sk);
1135 sock_put(sk);
1136
1137 /* Now restart from the beginning of this hash
1138 * chain. We always remove a session from the
1139 * list so we are guaranteed to make forward
1140 * progress.
1141 */
1142 write_lock(&tunnel->hlist_lock);
1143 goto again;
1144 }
1145 }
1146 write_unlock(&tunnel->hlist_lock);
1147}
1148
1149/* Really kill the tunnel.
1150 * Come here only when all sessions have been cleared from the tunnel.
1151 */
1152static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1153{
1154 /* Remove from socket list */
1155 write_lock(&pppol2tp_tunnel_list_lock);
1156 list_del_init(&tunnel->list);
1157 write_unlock(&pppol2tp_tunnel_list_lock);
1158
1159 atomic_dec(&pppol2tp_tunnel_count);
1160 kfree(tunnel);
1161}
1162
1163/* Tunnel UDP socket destruct hook.
1164 * The tunnel context is deleted only when all session sockets have been
1165 * closed.
1166 */
1167static void pppol2tp_tunnel_destruct(struct sock *sk)
1168{
1169 struct pppol2tp_tunnel *tunnel;
1170
1171 tunnel = pppol2tp_sock_to_tunnel(sk);
1172 if (tunnel == NULL)
1173 goto end;
1174
1175 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1176 "%s: closing...\n", tunnel->name);
1177
1178 /* Close all sessions */
1179 pppol2tp_tunnel_closeall(tunnel);
1180
1181 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1182 (udp_sk(sk))->encap_type = 0;
1183 (udp_sk(sk))->encap_rcv = NULL;
1184
1185 /* Remove hooks into tunnel socket */
1186 tunnel->sock = NULL;
1187 sk->sk_destruct = tunnel->old_sk_destruct;
1188 sk->sk_user_data = NULL;
1189
1190 /* Call original (UDP) socket descructor */
1191 if (sk->sk_destruct != NULL)
1192 (*sk->sk_destruct)(sk);
1193
1194 pppol2tp_tunnel_dec_refcount(tunnel);
1195
1196end:
1197 return;
1198}
1199
1200/* Really kill the session socket. (Called from sock_put() if
1201 * refcnt == 0.)
1202 */
1203static void pppol2tp_session_destruct(struct sock *sk)
1204{
1205 struct pppol2tp_session *session = NULL;
1206
1207 if (sk->sk_user_data != NULL) {
1208 struct pppol2tp_tunnel *tunnel;
1209
1210 session = pppol2tp_sock_to_session(sk);
1211 if (session == NULL)
1212 goto out;
1213
1214 /* Don't use pppol2tp_sock_to_tunnel() here to
1215 * get the tunnel context because the tunnel
1216 * socket might have already been closed (its
1217 * sk->sk_user_data will be NULL) so use the
1218 * session's private tunnel ptr instead.
1219 */
1220 tunnel = session->tunnel;
1221 if (tunnel != NULL) {
1222 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1223
1224 /* If session_id is zero, this is a null
1225 * session context, which was created for a
1226 * socket that is being used only to manage
1227 * tunnels.
1228 */
1229 if (session->tunnel_addr.s_session != 0) {
1230 /* Delete the session socket from the
1231 * hash
1232 */
1233 write_lock(&tunnel->hlist_lock);
1234 hlist_del_init(&session->hlist);
1235 write_unlock(&tunnel->hlist_lock);
1236
1237 atomic_dec(&pppol2tp_session_count);
1238 }
1239
1240 /* This will delete the tunnel context if this
1241 * is the last session on the tunnel.
1242 */
1243 session->tunnel = NULL;
1244 session->tunnel_sock = NULL;
1245 pppol2tp_tunnel_dec_refcount(tunnel);
1246 }
1247 }
1248
1249 kfree(session);
1250out:
1251 return;
1252}
1253
1254/* Called when the PPPoX socket (session) is closed.
1255 */
1256static int pppol2tp_release(struct socket *sock)
1257{
1258 struct sock *sk = sock->sk;
1259 int error;
1260
1261 if (!sk)
1262 return 0;
1263
1264 error = -EBADF;
1265 lock_sock(sk);
1266 if (sock_flag(sk, SOCK_DEAD) != 0)
1267 goto error;
1268
1269 pppox_unbind_sock(sk);
1270
1271 /* Signal the death of the socket. */
1272 sk->sk_state = PPPOX_DEAD;
1273 sock_orphan(sk);
1274 sock->sk = NULL;
1275
1276 /* Purge any queued data */
1277 skb_queue_purge(&sk->sk_receive_queue);
1278 skb_queue_purge(&sk->sk_write_queue);
1279
1280 release_sock(sk);
1281
1282 /* This will delete the session context via
1283 * pppol2tp_session_destruct() if the socket's refcnt drops to
1284 * zero.
1285 */
1286 sock_put(sk);
1287
1288 return 0;
1289
1290error:
1291 release_sock(sk);
1292 return error;
1293}
1294
1295/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1296 * sockets attached to it.
1297 */
1298static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1299 int *error)
1300{
1301 int err;
1302 struct socket *sock = NULL;
1303 struct sock *sk;
1304 struct pppol2tp_tunnel *tunnel;
1305 struct sock *ret = NULL;
1306
1307 /* Get the tunnel UDP socket from the fd, which was opened by
1308 * the userspace L2TP daemon.
1309 */
1310 err = -EBADF;
1311 sock = sockfd_lookup(fd, &err);
1312 if (!sock) {
1313 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1314 "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1315 tunnel_id, fd, err);
1316 goto err;
1317 }
1318
1319 /* Quick sanity checks */
1320 err = -ESOCKTNOSUPPORT;
1321 if (sock->type != SOCK_DGRAM) {
1322 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1323 "tunl %hu: fd %d wrong type, got %d, expected %d\n",
1324 tunnel_id, fd, sock->type, SOCK_DGRAM);
1325 goto err;
1326 }
1327 err = -EAFNOSUPPORT;
1328 if (sock->ops->family != AF_INET) {
1329 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1330 "tunl %hu: fd %d wrong family, got %d, expected %d\n",
1331 tunnel_id, fd, sock->ops->family, AF_INET);
1332 goto err;
1333 }
1334
1335 err = -ENOTCONN;
1336 sk = sock->sk;
1337
1338 /* Check if this socket has already been prepped */
1339 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
1340 if (tunnel != NULL) {
1341 /* User-data field already set */
1342 err = -EBUSY;
1343 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1344
1345 /* This socket has already been prepped */
1346 ret = tunnel->sock;
1347 goto out;
1348 }
1349
1350 /* This socket is available and needs prepping. Create a new tunnel
1351 * context and init it.
1352 */
1353 sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
1354 if (sk->sk_user_data == NULL) {
1355 err = -ENOMEM;
1356 goto err;
1357 }
1358
1359 tunnel->magic = L2TP_TUNNEL_MAGIC;
1360 sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
1361
1362 tunnel->stats.tunnel_id = tunnel_id;
1363 tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
1364
1365 /* Hook on the tunnel socket destructor so that we can cleanup
1366 * if the tunnel socket goes away.
1367 */
1368 tunnel->old_sk_destruct = sk->sk_destruct;
1369 sk->sk_destruct = &pppol2tp_tunnel_destruct;
1370
1371 tunnel->sock = sk;
1372 sk->sk_allocation = GFP_ATOMIC;
1373
1374 /* Misc init */
1375 rwlock_init(&tunnel->hlist_lock);
1376
1377 /* Add tunnel to our list */
1378 INIT_LIST_HEAD(&tunnel->list);
1379 write_lock(&pppol2tp_tunnel_list_lock);
1380 list_add(&tunnel->list, &pppol2tp_tunnel_list);
1381 write_unlock(&pppol2tp_tunnel_list_lock);
1382 atomic_inc(&pppol2tp_tunnel_count);
1383
1384 /* Bump the reference count. The tunnel context is deleted
1385 * only when this drops to zero.
1386 */
1387 pppol2tp_tunnel_inc_refcount(tunnel);
1388
1389 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1390 (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
1391 (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
1392
1393 ret = tunnel->sock;
1394
1395 *error = 0;
1396out:
1397 if (sock)
1398 sockfd_put(sock);
1399
1400 return ret;
1401
1402err:
1403 *error = err;
1404 goto out;
1405}
1406
1407static struct proto pppol2tp_sk_proto = {
1408 .name = "PPPOL2TP",
1409 .owner = THIS_MODULE,
1410 .obj_size = sizeof(struct pppox_sock),
1411};
1412
1413/* socket() handler. Initialize a new struct sock.
1414 */
1415static int pppol2tp_create(struct socket *sock)
1416{
1417 int error = -ENOMEM;
1418 struct sock *sk;
1419
1420 sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
1421 if (!sk)
1422 goto out;
1423
1424 sock_init_data(sock, sk);
1425
1426 sock->state = SS_UNCONNECTED;
1427 sock->ops = &pppol2tp_ops;
1428
1429 sk->sk_backlog_rcv = pppol2tp_recv_core;
1430 sk->sk_protocol = PX_PROTO_OL2TP;
1431 sk->sk_family = PF_PPPOX;
1432 sk->sk_state = PPPOX_NONE;
1433 sk->sk_type = SOCK_STREAM;
1434 sk->sk_destruct = pppol2tp_session_destruct;
1435
1436 error = 0;
1437
1438out:
1439 return error;
1440}
1441
1442/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
1443 */
1444static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1445 int sockaddr_len, int flags)
1446{
1447 struct sock *sk = sock->sk;
1448 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
1449 struct pppox_sock *po = pppox_sk(sk);
1450 struct sock *tunnel_sock = NULL;
1451 struct pppol2tp_session *session = NULL;
1452 struct pppol2tp_tunnel *tunnel;
1453 struct dst_entry *dst;
1454 int error = 0;
1455
1456 lock_sock(sk);
1457
1458 error = -EINVAL;
1459 if (sp->sa_protocol != PX_PROTO_OL2TP)
1460 goto end;
1461
1462 /* Check for already bound sockets */
1463 error = -EBUSY;
1464 if (sk->sk_state & PPPOX_CONNECTED)
1465 goto end;
1466
1467 /* We don't supporting rebinding anyway */
1468 error = -EALREADY;
1469 if (sk->sk_user_data)
1470 goto end; /* socket is already attached */
1471
1472 /* Don't bind if s_tunnel is 0 */
1473 error = -EINVAL;
1474 if (sp->pppol2tp.s_tunnel == 0)
1475 goto end;
1476
1477 /* Special case: prepare tunnel socket if s_session and
1478 * d_session is 0. Otherwise look up tunnel using supplied
1479 * tunnel id.
1480 */
1481 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1482 tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
1483 sp->pppol2tp.s_tunnel,
1484 &error);
1485 if (tunnel_sock == NULL)
1486 goto end;
1487
1488 tunnel = tunnel_sock->sk_user_data;
1489 } else {
1490 tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);
1491
1492 /* Error if we can't find the tunnel */
1493 error = -ENOENT;
1494 if (tunnel == NULL)
1495 goto end;
1496
1497 tunnel_sock = tunnel->sock;
1498 }
1499
1500 /* Check that this session doesn't already exist */
1501 error = -EEXIST;
1502 session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
1503 if (session != NULL)
1504 goto end;
1505
1506 /* Allocate and initialize a new session context. */
1507 session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
1508 if (session == NULL) {
1509 error = -ENOMEM;
1510 goto end;
1511 }
1512
1513 skb_queue_head_init(&session->reorder_q);
1514
1515 session->magic = L2TP_SESSION_MAGIC;
1516 session->owner = current->pid;
1517 session->sock = sk;
1518 session->tunnel = tunnel;
1519 session->tunnel_sock = tunnel_sock;
1520 session->tunnel_addr = sp->pppol2tp;
1521 sprintf(&session->name[0], "sess %hu/%hu",
1522 session->tunnel_addr.s_tunnel,
1523 session->tunnel_addr.s_session);
1524
1525 session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
1526 session->stats.session_id = session->tunnel_addr.s_session;
1527
1528 INIT_HLIST_NODE(&session->hlist);
1529
1530 /* Inherit debug options from tunnel */
1531 session->debug = tunnel->debug;
1532
1533 /* Default MTU must allow space for UDP/L2TP/PPP
1534 * headers.
1535 */
1536 session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
1537
1538 /* If PMTU discovery was enabled, use the MTU that was discovered */
1539 dst = sk_dst_get(sk);
1540 if (dst != NULL) {
1541 u32 pmtu = dst_mtu(__sk_dst_get(sk));
1542 if (pmtu != 0)
1543 session->mtu = session->mru = pmtu -
1544 PPPOL2TP_HEADER_OVERHEAD;
1545 dst_release(dst);
1546 }
1547
1548 /* Special case: if source & dest session_id == 0x0000, this socket is
1549 * being created to manage the tunnel. Don't add the session to the
1550 * session hash list, just set up the internal context for use by
1551 * ioctl() and sockopt() handlers.
1552 */
1553 if ((session->tunnel_addr.s_session == 0) &&
1554 (session->tunnel_addr.d_session == 0)) {
1555 error = 0;
1556 sk->sk_user_data = session;
1557 goto out_no_ppp;
1558 }
1559
1560 /* Get tunnel context from the tunnel socket */
1561 tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
1562 if (tunnel == NULL) {
1563 error = -EBADF;
1564 goto end;
1565 }
1566
1567 /* Right now, because we don't have a way to push the incoming skb's
1568 * straight through the UDP layer, the only header we need to worry
1569 * about is the L2TP header. This size is different depending on
1570 * whether sequence numbers are enabled for the data channel.
1571 */
1572 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1573
1574 po->chan.private = sk;
1575 po->chan.ops = &pppol2tp_chan_ops;
1576 po->chan.mtu = session->mtu;
1577
1578 error = ppp_register_channel(&po->chan);
1579 if (error)
1580 goto end;
1581
1582 /* This is how we get the session context from the socket. */
1583 sk->sk_user_data = session;
1584
1585 /* Add session to the tunnel's hash list */
1586 write_lock(&tunnel->hlist_lock);
1587 hlist_add_head(&session->hlist,
1588 pppol2tp_session_id_hash(tunnel,
1589 session->tunnel_addr.s_session));
1590 write_unlock(&tunnel->hlist_lock);
1591
1592 atomic_inc(&pppol2tp_session_count);
1593
1594out_no_ppp:
1595 pppol2tp_tunnel_inc_refcount(tunnel);
1596 sk->sk_state = PPPOX_CONNECTED;
1597 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1598 "%s: created\n", session->name);
1599
1600end:
1601 release_sock(sk);
1602
1603 if (error != 0)
1604 PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1605 "%s: connect failed: %d\n", session->name, error);
1606
1607 return error;
1608}
1609
1610/* getname() support.
1611 */
1612static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
1613 int *usockaddr_len, int peer)
1614{
1615 int len = sizeof(struct sockaddr_pppol2tp);
1616 struct sockaddr_pppol2tp sp;
1617 int error = 0;
1618 struct pppol2tp_session *session;
1619
1620 error = -ENOTCONN;
1621 if (sock->sk->sk_state != PPPOX_CONNECTED)
1622 goto end;
1623
1624 session = pppol2tp_sock_to_session(sock->sk);
1625 if (session == NULL) {
1626 error = -EBADF;
1627 goto end;
1628 }
1629
1630 sp.sa_family = AF_PPPOX;
1631 sp.sa_protocol = PX_PROTO_OL2TP;
1632 memcpy(&sp.pppol2tp, &session->tunnel_addr,
1633 sizeof(struct pppol2tp_addr));
1634
1635 memcpy(uaddr, &sp, len);
1636
1637 *usockaddr_len = len;
1638
1639 error = 0;
1640
1641end:
1642 return error;
1643}
1644
1645/****************************************************************************
1646 * ioctl() handlers.
1647 *
1648 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1649 * sockets. However, in order to control kernel tunnel features, we allow
1650 * userspace to create a special "tunnel" PPPoX socket which is used for
1651 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
1652 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
1653 * calls.
1654 ****************************************************************************/
1655
1656/* Session ioctl helper.
1657 */
1658static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
1659 unsigned int cmd, unsigned long arg)
1660{
1661 struct ifreq ifr;
1662 int err = 0;
1663 struct sock *sk = session->sock;
1664 int val = (int) arg;
1665
1666 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1667 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
1668 session->name, cmd, arg);
1669
1670 sock_hold(sk);
1671
1672 switch (cmd) {
1673 case SIOCGIFMTU:
1674 err = -ENXIO;
1675 if (!(sk->sk_state & PPPOX_CONNECTED))
1676 break;
1677
1678 err = -EFAULT;
1679 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1680 break;
1681 ifr.ifr_mtu = session->mtu;
1682 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1683 break;
1684
1685 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1686 "%s: get mtu=%d\n", session->name, session->mtu);
1687 err = 0;
1688 break;
1689
1690 case SIOCSIFMTU:
1691 err = -ENXIO;
1692 if (!(sk->sk_state & PPPOX_CONNECTED))
1693 break;
1694
1695 err = -EFAULT;
1696 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1697 break;
1698
1699 session->mtu = ifr.ifr_mtu;
1700
1701 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1702 "%s: set mtu=%d\n", session->name, session->mtu);
1703 err = 0;
1704 break;
1705
1706 case PPPIOCGMRU:
1707 err = -ENXIO;
1708 if (!(sk->sk_state & PPPOX_CONNECTED))
1709 break;
1710
1711 err = -EFAULT;
1712 if (put_user(session->mru, (int __user *) arg))
1713 break;
1714
1715 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1716 "%s: get mru=%d\n", session->name, session->mru);
1717 err = 0;
1718 break;
1719
1720 case PPPIOCSMRU:
1721 err = -ENXIO;
1722 if (!(sk->sk_state & PPPOX_CONNECTED))
1723 break;
1724
1725 err = -EFAULT;
1726 if (get_user(val,(int __user *) arg))
1727 break;
1728
1729 session->mru = val;
1730 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1731 "%s: set mru=%d\n", session->name, session->mru);
1732 err = 0;
1733 break;
1734
1735 case PPPIOCGFLAGS:
1736 err = -EFAULT;
1737 if (put_user(session->flags, (int __user *) arg))
1738 break;
1739
1740 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1741 "%s: get flags=%d\n", session->name, session->flags);
1742 err = 0;
1743 break;
1744
1745 case PPPIOCSFLAGS:
1746 err = -EFAULT;
1747 if (get_user(val, (int __user *) arg))
1748 break;
1749 session->flags = val;
1750 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1751 "%s: set flags=%d\n", session->name, session->flags);
1752 err = 0;
1753 break;
1754
1755 case PPPIOCGL2TPSTATS:
1756 err = -ENXIO;
1757 if (!(sk->sk_state & PPPOX_CONNECTED))
1758 break;
1759
1760 if (copy_to_user((void __user *) arg, &session->stats,
1761 sizeof(session->stats)))
1762 break;
1763 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1764 "%s: get L2TP stats\n", session->name);
1765 err = 0;
1766 break;
1767
1768 default:
1769 err = -ENOSYS;
1770 break;
1771 }
1772
1773 sock_put(sk);
1774
1775 return err;
1776}
1777
1778/* Tunnel ioctl helper.
1779 *
1780 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1781 * specifies a session_id, the session ioctl handler is called. This allows an
1782 * application to retrieve session stats via a tunnel socket.
1783 */
1784static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
1785 unsigned int cmd, unsigned long arg)
1786{
1787 int err = 0;
1788 struct sock *sk = tunnel->sock;
1789 struct pppol2tp_ioc_stats stats_req;
1790
1791 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1792 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
1793 cmd, arg);
1794
1795 sock_hold(sk);
1796
1797 switch (cmd) {
1798 case PPPIOCGL2TPSTATS:
1799 err = -ENXIO;
1800 if (!(sk->sk_state & PPPOX_CONNECTED))
1801 break;
1802
1803 if (copy_from_user(&stats_req, (void __user *) arg,
1804 sizeof(stats_req))) {
1805 err = -EFAULT;
1806 break;
1807 }
1808 if (stats_req.session_id != 0) {
1809 /* resend to session ioctl handler */
1810 struct pppol2tp_session *session =
1811 pppol2tp_session_find(tunnel, stats_req.session_id);
1812 if (session != NULL)
1813 err = pppol2tp_session_ioctl(session, cmd, arg);
1814 else
1815 err = -EBADR;
1816 break;
1817 }
1818#ifdef CONFIG_XFRM
1819 tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
1820#endif
1821 if (copy_to_user((void __user *) arg, &tunnel->stats,
1822 sizeof(tunnel->stats))) {
1823 err = -EFAULT;
1824 break;
1825 }
1826 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1827 "%s: get L2TP stats\n", tunnel->name);
1828 err = 0;
1829 break;
1830
1831 default:
1832 err = -ENOSYS;
1833 break;
1834 }
1835
1836 sock_put(sk);
1837
1838 return err;
1839}
1840
1841/* Main ioctl() handler.
1842 * Dispatch to tunnel or session helpers depending on the socket.
1843 */
1844static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
1845 unsigned long arg)
1846{
1847 struct sock *sk = sock->sk;
1848 struct pppol2tp_session *session;
1849 struct pppol2tp_tunnel *tunnel;
1850 int err;
1851
1852 if (!sk)
1853 return 0;
1854
1855 err = -EBADF;
1856 if (sock_flag(sk, SOCK_DEAD) != 0)
1857 goto end;
1858
1859 err = -ENOTCONN;
1860 if ((sk->sk_user_data == NULL) ||
1861 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
1862 goto end;
1863
1864 /* Get session context from the socket */
1865 err = -EBADF;
1866 session = pppol2tp_sock_to_session(sk);
1867 if (session == NULL)
1868 goto end;
1869
1870 /* Special case: if session's session_id is zero, treat ioctl as a
1871 * tunnel ioctl
1872 */
1873 if ((session->tunnel_addr.s_session == 0) &&
1874 (session->tunnel_addr.d_session == 0)) {
1875 err = -EBADF;
1876 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
1877 if (tunnel == NULL)
1878 goto end;
1879
1880 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
1881 goto end;
1882 }
1883
1884 err = pppol2tp_session_ioctl(session, cmd, arg);
1885
1886end:
1887 return err;
1888}
1889
1890/*****************************************************************************
1891 * setsockopt() / getsockopt() support.
1892 *
1893 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1894 * sockets. In order to control kernel tunnel features, we allow userspace to
1895 * create a special "tunnel" PPPoX socket which is used for control only.
1896 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
1897 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
1898 *****************************************************************************/
1899
1900/* Tunnel setsockopt() helper.
1901 */
1902static int pppol2tp_tunnel_setsockopt(struct sock *sk,
1903 struct pppol2tp_tunnel *tunnel,
1904 int optname, int val)
1905{
1906 int err = 0;
1907
1908 switch (optname) {
1909 case PPPOL2TP_SO_DEBUG:
1910 tunnel->debug = val;
1911 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1912 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
1913 break;
1914
1915 default:
1916 err = -ENOPROTOOPT;
1917 break;
1918 }
1919
1920 return err;
1921}
1922
1923/* Session setsockopt helper.
1924 */
1925static int pppol2tp_session_setsockopt(struct sock *sk,
1926 struct pppol2tp_session *session,
1927 int optname, int val)
1928{
1929 int err = 0;
1930
1931 switch (optname) {
1932 case PPPOL2TP_SO_RECVSEQ:
1933 if ((val != 0) && (val != 1)) {
1934 err = -EINVAL;
1935 break;
1936 }
1937 session->recv_seq = val ? -1 : 0;
1938 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1939 "%s: set recv_seq=%d\n", session->name,
1940 session->recv_seq);
1941 break;
1942
1943 case PPPOL2TP_SO_SENDSEQ:
1944 if ((val != 0) && (val != 1)) {
1945 err = -EINVAL;
1946 break;
1947 }
1948 session->send_seq = val ? -1 : 0;
1949 {
1950 struct sock *ssk = session->sock;
1951 struct pppox_sock *po = pppox_sk(ssk);
1952 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1953 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1954 }
1955 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1956 "%s: set send_seq=%d\n", session->name, session->send_seq);
1957 break;
1958
1959 case PPPOL2TP_SO_LNSMODE:
1960 if ((val != 0) && (val != 1)) {
1961 err = -EINVAL;
1962 break;
1963 }
1964 session->lns_mode = val ? -1 : 0;
1965 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1966 "%s: set lns_mode=%d\n", session->name,
1967 session->lns_mode);
1968 break;
1969
1970 case PPPOL2TP_SO_DEBUG:
1971 session->debug = val;
1972 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1973 "%s: set debug=%x\n", session->name, session->debug);
1974 break;
1975
1976 case PPPOL2TP_SO_REORDERTO:
1977 session->reorder_timeout = msecs_to_jiffies(val);
1978 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1979 "%s: set reorder_timeout=%d\n", session->name,
1980 session->reorder_timeout);
1981 break;
1982
1983 default:
1984 err = -ENOPROTOOPT;
1985 break;
1986 }
1987
1988 return err;
1989}
1990
1991/* Main setsockopt() entry point.
1992 * Does API checks, then calls either the tunnel or session setsockopt
1993 * handler, according to whether the PPPoL2TP socket is a for a regular
1994 * session or the special tunnel type.
1995 */
1996static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1997 char __user *optval, int optlen)
1998{
1999 struct sock *sk = sock->sk;
2000 struct pppol2tp_session *session = sk->sk_user_data;
2001 struct pppol2tp_tunnel *tunnel;
2002 int val;
2003 int err;
2004
2005 if (level != SOL_PPPOL2TP)
2006 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
2007
2008 if (optlen < sizeof(int))
2009 return -EINVAL;
2010
2011 if (get_user(val, (int __user *)optval))
2012 return -EFAULT;
2013
2014 err = -ENOTCONN;
2015 if (sk->sk_user_data == NULL)
2016 goto end;
2017
2018 /* Get session context from the socket */
2019 err = -EBADF;
2020 session = pppol2tp_sock_to_session(sk);
2021 if (session == NULL)
2022 goto end;
2023
2024 /* Special case: if session_id == 0x0000, treat as operation on tunnel
2025 */
2026 if ((session->tunnel_addr.s_session == 0) &&
2027 (session->tunnel_addr.d_session == 0)) {
2028 err = -EBADF;
2029 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2030 if (tunnel == NULL)
2031 goto end;
2032
2033 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
2034 } else
2035 err = pppol2tp_session_setsockopt(sk, session, optname, val);
2036
2037 err = 0;
2038
2039end:
2040 return err;
2041}
2042
2043/* Tunnel getsockopt helper. Called with sock locked.
2044 */
2045static int pppol2tp_tunnel_getsockopt(struct sock *sk,
2046 struct pppol2tp_tunnel *tunnel,
2047 int optname, int __user *val)
2048{
2049 int err = 0;
2050
2051 switch (optname) {
2052 case PPPOL2TP_SO_DEBUG:
2053 *val = tunnel->debug;
2054 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2055 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
2056 break;
2057
2058 default:
2059 err = -ENOPROTOOPT;
2060 break;
2061 }
2062
2063 return err;
2064}
2065
2066/* Session getsockopt helper. Called with sock locked.
2067 */
2068static int pppol2tp_session_getsockopt(struct sock *sk,
2069 struct pppol2tp_session *session,
2070 int optname, int __user *val)
2071{
2072 int err = 0;
2073
2074 switch (optname) {
2075 case PPPOL2TP_SO_RECVSEQ:
2076 *val = session->recv_seq;
2077 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2078 "%s: get recv_seq=%d\n", session->name, *val);
2079 break;
2080
2081 case PPPOL2TP_SO_SENDSEQ:
2082 *val = session->send_seq;
2083 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2084 "%s: get send_seq=%d\n", session->name, *val);
2085 break;
2086
2087 case PPPOL2TP_SO_LNSMODE:
2088 *val = session->lns_mode;
2089 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2090 "%s: get lns_mode=%d\n", session->name, *val);
2091 break;
2092
2093 case PPPOL2TP_SO_DEBUG:
2094 *val = session->debug;
2095 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2096 "%s: get debug=%d\n", session->name, *val);
2097 break;
2098
2099 case PPPOL2TP_SO_REORDERTO:
2100 *val = (int) jiffies_to_msecs(session->reorder_timeout);
2101 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2102 "%s: get reorder_timeout=%d\n", session->name, *val);
2103 break;
2104
2105 default:
2106 err = -ENOPROTOOPT;
2107 }
2108
2109 return err;
2110}
2111
2112/* Main getsockopt() entry point.
2113 * Does API checks, then calls either the tunnel or session getsockopt
2114 * handler, according to whether the PPPoX socket is a for a regular session
2115 * or the special tunnel type.
2116 */
2117static int pppol2tp_getsockopt(struct socket *sock, int level,
2118 int optname, char __user *optval, int __user *optlen)
2119{
2120 struct sock *sk = sock->sk;
2121 struct pppol2tp_session *session = sk->sk_user_data;
2122 struct pppol2tp_tunnel *tunnel;
2123 int val, len;
2124 int err;
2125
2126 if (level != SOL_PPPOL2TP)
2127 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
2128
2129 if (get_user(len, (int __user *) optlen))
2130 return -EFAULT;
2131
2132 len = min_t(unsigned int, len, sizeof(int));
2133
2134 if (len < 0)
2135 return -EINVAL;
2136
2137 err = -ENOTCONN;
2138 if (sk->sk_user_data == NULL)
2139 goto end;
2140
2141 /* Get the session context */
2142 err = -EBADF;
2143 session = pppol2tp_sock_to_session(sk);
2144 if (session == NULL)
2145 goto end;
2146
2147 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
2148 if ((session->tunnel_addr.s_session == 0) &&
2149 (session->tunnel_addr.d_session == 0)) {
2150 err = -EBADF;
2151 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2152 if (tunnel == NULL)
2153 goto end;
2154
2155 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
2156 } else
2157 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
2158
2159 err = -EFAULT;
2160 if (put_user(len, (int __user *) optlen))
2161 goto end;
2162
2163 if (copy_to_user((void __user *) optval, &val, len))
2164 goto end;
2165
2166 err = 0;
2167end:
2168 return err;
2169}
2170
2171/*****************************************************************************
2172 * /proc filesystem for debug
2173 *****************************************************************************/
2174
2175#ifdef CONFIG_PROC_FS
2176
2177#include <linux/seq_file.h>
2178
2179struct pppol2tp_seq_data {
2180 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2181 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2182};
2183
2184static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
2185{
2186 struct pppol2tp_session *session = NULL;
2187 struct hlist_node *walk;
2188 int found = 0;
2189 int next = 0;
2190 int i;
2191
2192 read_lock(&tunnel->hlist_lock);
2193 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
2194 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
2195 if (curr == NULL) {
2196 found = 1;
2197 goto out;
2198 }
2199 if (session == curr) {
2200 next = 1;
2201 continue;
2202 }
2203 if (next) {
2204 found = 1;
2205 goto out;
2206 }
2207 }
2208 }
2209out:
2210 read_unlock(&tunnel->hlist_lock);
2211 if (!found)
2212 session = NULL;
2213
2214 return session;
2215}
2216
2217static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
2218{
2219 struct pppol2tp_tunnel *tunnel = NULL;
2220
2221 read_lock(&pppol2tp_tunnel_list_lock);
2222 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
2223 goto out;
2224 }
2225 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2226out:
2227 read_unlock(&pppol2tp_tunnel_list_lock);
2228
2229 return tunnel;
2230}
2231
2232static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2233{
2234 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2235 loff_t pos = *offs;
2236
2237 if (!pos)
2238 goto out;
2239
2240 BUG_ON(m->private == NULL);
2241 pd = m->private;
2242
2243 if (pd->tunnel == NULL) {
2244 if (!list_empty(&pppol2tp_tunnel_list))
2245 pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2246 } else {
2247 pd->session = next_session(pd->tunnel, pd->session);
2248 if (pd->session == NULL) {
2249 pd->tunnel = next_tunnel(pd->tunnel);
2250 }
2251 }
2252
2253 /* NULL tunnel and session indicates end of list */
2254 if ((pd->tunnel == NULL) && (pd->session == NULL))
2255 pd = NULL;
2256
2257out:
2258 return pd;
2259}
2260
2261static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
2262{
2263 (*pos)++;
2264 return NULL;
2265}
2266
2267static void pppol2tp_seq_stop(struct seq_file *p, void *v)
2268{
2269 /* nothing to do */
2270}
2271
2272static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
2273{
2274 struct pppol2tp_tunnel *tunnel = v;
2275
2276 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
2277 tunnel->name,
2278 (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
2279 atomic_read(&tunnel->ref_count) - 1);
2280 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
2281 tunnel->debug,
2282 tunnel->stats.tx_packets, tunnel->stats.tx_bytes,
2283 tunnel->stats.tx_errors,
2284 tunnel->stats.rx_packets, tunnel->stats.rx_bytes,
2285 tunnel->stats.rx_errors);
2286}
2287
2288static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2289{
2290 struct pppol2tp_session *session = v;
2291
2292 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
2293 "%04X/%04X %d %c\n",
2294 session->name,
2295 ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
2296 ntohs(session->tunnel_addr.addr.sin_port),
2297 session->tunnel_addr.s_tunnel,
2298 session->tunnel_addr.s_session,
2299 session->tunnel_addr.d_tunnel,
2300 session->tunnel_addr.d_session,
2301 session->sock->sk_state,
2302 (session == session->sock->sk_user_data) ?
2303 'Y' : 'N');
2304 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
2305 session->mtu, session->mru,
2306 session->recv_seq ? 'R' : '-',
2307 session->send_seq ? 'S' : '-',
2308 session->lns_mode ? "LNS" : "LAC",
2309 session->debug,
2310 jiffies_to_msecs(session->reorder_timeout));
2311 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
2312 session->nr, session->ns,
2313 session->stats.tx_packets,
2314 session->stats.tx_bytes,
2315 session->stats.tx_errors,
2316 session->stats.rx_packets,
2317 session->stats.rx_bytes,
2318 session->stats.rx_errors);
2319}
2320
2321static int pppol2tp_seq_show(struct seq_file *m, void *v)
2322{
2323 struct pppol2tp_seq_data *pd = v;
2324
2325 /* display header on line 1 */
2326 if (v == SEQ_START_TOKEN) {
2327 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
2328 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
2329 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2330 seq_puts(m, " SESSION name, addr/port src-tid/sid "
2331 "dest-tid/sid state user-data-ok\n");
2332 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
2333 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2334 goto out;
2335 }
2336
2337 /* Show the tunnel or session context.
2338 */
2339 if (pd->session == NULL)
2340 pppol2tp_seq_tunnel_show(m, pd->tunnel);
2341 else
2342 pppol2tp_seq_session_show(m, pd->session);
2343
2344out:
2345 return 0;
2346}
2347
2348static struct seq_operations pppol2tp_seq_ops = {
2349 .start = pppol2tp_seq_start,
2350 .next = pppol2tp_seq_next,
2351 .stop = pppol2tp_seq_stop,
2352 .show = pppol2tp_seq_show,
2353};
2354
2355/* Called when our /proc file is opened. We allocate data for use when
2356 * iterating our tunnel / session contexts and store it in the private
2357 * data of the seq_file.
2358 */
2359static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2360{
2361 struct seq_file *m;
2362 struct pppol2tp_seq_data *pd;
2363 int ret = 0;
2364
2365 ret = seq_open(file, &pppol2tp_seq_ops);
2366 if (ret < 0)
2367 goto out;
2368
2369 m = file->private_data;
2370
2371 /* Allocate and fill our proc_data for access later */
2372 ret = -ENOMEM;
2373 m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
2374 if (m->private == NULL)
2375 goto out;
2376
2377 pd = m->private;
2378 ret = 0;
2379
2380out:
2381 return ret;
2382}
2383
2384/* Called when /proc file access completes.
2385 */
2386static int pppol2tp_proc_release(struct inode *inode, struct file *file)
2387{
2388 struct seq_file *m = (struct seq_file *)file->private_data;
2389
2390 kfree(m->private);
2391 m->private = NULL;
2392
2393 return seq_release(inode, file);
2394}
2395
2396static struct file_operations pppol2tp_proc_fops = {
2397 .owner = THIS_MODULE,
2398 .open = pppol2tp_proc_open,
2399 .read = seq_read,
2400 .llseek = seq_lseek,
2401 .release = pppol2tp_proc_release,
2402};
2403
2404static struct proc_dir_entry *pppol2tp_proc;
2405
2406#endif /* CONFIG_PROC_FS */
2407
2408/*****************************************************************************
2409 * Init and cleanup
2410 *****************************************************************************/
2411
2412static struct proto_ops pppol2tp_ops = {
2413 .family = AF_PPPOX,
2414 .owner = THIS_MODULE,
2415 .release = pppol2tp_release,
2416 .bind = sock_no_bind,
2417 .connect = pppol2tp_connect,
2418 .socketpair = sock_no_socketpair,
2419 .accept = sock_no_accept,
2420 .getname = pppol2tp_getname,
2421 .poll = datagram_poll,
2422 .listen = sock_no_listen,
2423 .shutdown = sock_no_shutdown,
2424 .setsockopt = pppol2tp_setsockopt,
2425 .getsockopt = pppol2tp_getsockopt,
2426 .sendmsg = pppol2tp_sendmsg,
2427 .recvmsg = pppol2tp_recvmsg,
2428 .mmap = sock_no_mmap,
2429 .ioctl = pppox_ioctl,
2430};
2431
2432static struct pppox_proto pppol2tp_proto = {
2433 .create = pppol2tp_create,
2434 .ioctl = pppol2tp_ioctl
2435};
2436
2437static int __init pppol2tp_init(void)
2438{
2439 int err;
2440
2441 err = proto_register(&pppol2tp_sk_proto, 0);
2442 if (err)
2443 goto out;
2444 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
2445 if (err)
2446 goto out_unregister_pppol2tp_proto;
2447
2448#ifdef CONFIG_PROC_FS
2449 pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net);
2450 if (!pppol2tp_proc) {
2451 err = -ENOMEM;
2452 goto out_unregister_pppox_proto;
2453 }
2454 pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
2455#endif /* CONFIG_PROC_FS */
2456 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2457 PPPOL2TP_DRV_VERSION);
2458
2459out:
2460 return err;
2461
2462out_unregister_pppox_proto:
2463 unregister_pppox_proto(PX_PROTO_OL2TP);
2464out_unregister_pppol2tp_proto:
2465 proto_unregister(&pppol2tp_sk_proto);
2466 goto out;
2467}
2468
2469static void __exit pppol2tp_exit(void)
2470{
2471 unregister_pppox_proto(PX_PROTO_OL2TP);
2472
2473#ifdef CONFIG_PROC_FS
2474 remove_proc_entry("pppol2tp", proc_net);
2475#endif
2476 proto_unregister(&pppol2tp_sk_proto);
2477}
2478
2479module_init(pppol2tp_init);
2480module_exit(pppol2tp_exit);
2481
2482MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>,"
2483 "James Chapman <jchapman@katalix.com>");
2484MODULE_DESCRIPTION("PPP over L2TP over UDP");
2485MODULE_LICENSE("GPL");
2486MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 7d549355815a..afef6c0c59fe 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1137,7 +1137,7 @@ static int init_nic(struct s2io_nic *nic)
1137 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. 1137 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1138 */ 1138 */
1139 if ((nic->device_type == XFRAME_I_DEVICE) && 1139 if ((nic->device_type == XFRAME_I_DEVICE) &&
1140 (get_xena_rev_id(nic->pdev) < 4)) 1140 (nic->pdev->revision < 4))
1141 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); 1141 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1142 1142
1143 val64 = readq(&bar0->tx_fifo_partition_0); 1143 val64 = readq(&bar0->tx_fifo_partition_0);
@@ -1875,7 +1875,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1875 herc = (sp->device_type == XFRAME_II_DEVICE); 1875 herc = (sp->device_type == XFRAME_II_DEVICE);
1876 1876
1877 if (flag == FALSE) { 1877 if (flag == FALSE) {
1878 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) { 1878 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1879 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE)) 1879 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1880 ret = 1; 1880 ret = 1;
1881 } else { 1881 } else {
@@ -1883,7 +1883,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1883 ret = 1; 1883 ret = 1;
1884 } 1884 }
1885 } else { 1885 } else {
1886 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) { 1886 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1887 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == 1887 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1888 ADAPTER_STATUS_RMAC_PCC_IDLE)) 1888 ADAPTER_STATUS_RMAC_PCC_IDLE))
1889 ret = 1; 1889 ret = 1;
@@ -7078,23 +7078,6 @@ static void s2io_link(struct s2io_nic * sp, int link)
7078} 7078}
7079 7079
7080/** 7080/**
7081 * get_xena_rev_id - to identify revision ID of xena.
7082 * @pdev : PCI Dev structure
7083 * Description:
7084 * Function to identify the Revision ID of xena.
7085 * Return value:
7086 * returns the revision ID of the device.
7087 */
7088
7089static int get_xena_rev_id(struct pci_dev *pdev)
7090{
7091 u8 id = 0;
7092 int ret;
7093 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
7094 return id;
7095}
7096
7097/**
7098 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 7081 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7099 * @sp : private member of the device structure, which is a pointer to the 7082 * @sp : private member of the device structure, which is a pointer to the
7100 * s2io_nic structure. 7083 * s2io_nic structure.
@@ -7552,7 +7535,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7552 s2io_vpd_read(sp); 7535 s2io_vpd_read(sp);
7553 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); 7536 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7554 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, 7537 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7555 sp->product_name, get_xena_rev_id(sp->pdev)); 7538 sp->product_name, pdev->revision);
7556 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7539 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7557 s2io_driver_version); 7540 s2io_driver_version);
7558 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7541 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 58592780f519..3887fe63a908 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1033,7 +1033,6 @@ static void s2io_set_link(struct work_struct *work);
1033static int s2io_set_swapper(struct s2io_nic * sp); 1033static int s2io_set_swapper(struct s2io_nic * sp);
1034static void s2io_card_down(struct s2io_nic *nic); 1034static void s2io_card_down(struct s2io_nic *nic);
1035static int s2io_card_up(struct s2io_nic *nic); 1035static int s2io_card_up(struct s2io_nic *nic);
1036static int get_xena_rev_id(struct pci_dev *pdev);
1037static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, 1036static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
1038 int bit_state); 1037 int bit_state);
1039static int s2io_add_isr(struct s2io_nic * sp); 1038static int s2io_add_isr(struct s2io_nic * sp);
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index ad94358ece89..451486b32f23 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev)
690 lp->stats.rx_packets++; 690 lp->stats.rx_packets++;
691 skb_reserve(skb, 2); /* 16 byte align */ 691 skb_reserve(skb, 2); /* 16 byte align */
692 skb_put(skb, len); /* make room */ 692 skb_put(skb, len); /* make room */
693 eth_copy_and_sum(skb, 693 skb_copy_to_linear_data(skb,
694 (unsigned char *) pData, 694 (unsigned char *) pData,
695 len, 0); 695 len);
696 skb->protocol = eth_type_trans(skb, dev); 696 skb->protocol = eth_type_trans(skb, dev);
697 netif_rx(skb); 697 netif_rx(skb);
698 dev->last_rx = jiffies; 698 dev->last_rx = jiffies;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 2106becf6990..384b4685e977 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
320 skb_put(skb, len); 320 skb_put(skb, len);
321 321
322 /* Copy out of kseg1 to avoid silly cache flush. */ 322 /* Copy out of kseg1 to avoid silly cache flush. */
323 eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); 323 skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
324 skb->protocol = eth_type_trans(skb, dev); 324 skb->protocol = eth_type_trans(skb, dev);
325 325
326 /* We don't want to receive our own packets */ 326 /* We don't want to receive our own packets */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index bc8de48da313..ec2ad9f0efa2 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
549 if (skb) { 549 if (skb) {
550 skb_reserve(skb, NET_IP_ALIGN); 550 skb_reserve(skb, NET_IP_ALIGN);
551 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 551 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
552 *sk_buff = skb; 552 *sk_buff = skb;
553 sis190_give_to_asic(desc, rx_buf_sz); 553 sis190_give_to_asic(desc, rx_buf_sz);
554 ret = 0; 554 ret = 0;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 786d4b9c07ec..8b6478663a56 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -740,7 +740,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
740 pci_set_master(pdev); 740 pci_set_master(pdev);
741 741
742 /* enable MWI -- it vastly improves Rx performance on sparc64 */ 742 /* enable MWI -- it vastly improves Rx performance on sparc64 */
743 pci_set_mwi(pdev); 743 pci_try_set_mwi(pdev);
744 744
745#ifdef ZEROCOPY 745#ifdef ZEROCOPY
746 /* Starfire can do TCP/UDP checksumming */ 746 /* Starfire can do TCP/UDP checksumming */
@@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1456 pci_dma_sync_single_for_cpu(np->pci_dev, 1456 pci_dma_sync_single_for_cpu(np->pci_dev,
1457 np->rx_info[entry].mapping, 1457 np->rx_info[entry].mapping,
1458 pkt_len, PCI_DMA_FROMDEVICE); 1458 pkt_len, PCI_DMA_FROMDEVICE);
1459 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0); 1459 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1460 pci_dma_sync_single_for_device(np->pci_dev, 1460 pci_dma_sync_single_for_device(np->pci_dev,
1461 np->rx_info[entry].mapping, 1461 np->rx_info[entry].mapping,
1462 pkt_len, PCI_DMA_FROMDEVICE); 1462 pkt_len, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a123ea87893b..b77ab6e8fd35 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
777 { 777 {
778 skb_reserve(skb,2); 778 skb_reserve(skb,2);
779 skb_put(skb,totlen); 779 skb_put(skb,totlen);
780 eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); 780 skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
781 skb->protocol=eth_type_trans(skb,dev); 781 skb->protocol=eth_type_trans(skb,dev);
782 netif_rx(skb); 782 netif_rx(skb);
783 p->stats.rx_packets++; 783 p->stats.rx_packets++;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 791e081fdc15..f1548c033327 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev )
853 853
854 skb_reserve( skb, 2 ); /* 16 byte align */ 854 skb_reserve( skb, 2 ); /* 16 byte align */
855 skb_put( skb, pkt_len ); /* Make room */ 855 skb_put( skb, pkt_len ); /* Make room */
856// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); 856 skb_copy_to_linear_data(skb,
857 eth_copy_and_sum(skb,
858 PKTBUF_ADDR(head), 857 PKTBUF_ADDR(head),
859 pkt_len, 0); 858 pkt_len);
860 859
861 skb->protocol = eth_type_trans( skb, dev ); 860 skb->protocol = eth_type_trans( skb, dev );
862 netif_rx( skb ); 861 netif_rx( skb );
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 2ad8d58dee3b..b3e0158def4f 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp)
860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, 860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
861 this->rx_addr, len, 861 this->rx_addr, len,
862 SBUS_DMA_FROMDEVICE); 862 SBUS_DMA_FROMDEVICE);
863 eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0); 863 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
864 sbus_dma_sync_single_for_device(bp->bigmac_sdev, 864 sbus_dma_sync_single_for_device(bp->bigmac_sdev,
865 this->rx_addr, len, 865 this->rx_addr, len,
866 SBUS_DMA_FROMDEVICE); 866 SBUS_DMA_FROMDEVICE);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e1f912d04043..af0c9831074c 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -397,7 +397,6 @@ struct netdev_private {
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ 397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev; 398 struct pci_dev *pci_dev;
399 void __iomem *base; 399 void __iomem *base;
400 unsigned char pci_rev_id;
401}; 400};
402 401
403/* The station address location in the EEPROM. */ 402/* The station address location in the EEPROM. */
@@ -544,8 +543,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
544 dev->change_mtu = &change_mtu; 543 dev->change_mtu = &change_mtu;
545 pci_set_drvdata(pdev, dev); 544 pci_set_drvdata(pdev, dev);
546 545
547 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
548
549 i = register_netdev(dev); 546 i = register_netdev(dev);
550 if (i) 547 if (i)
551 goto err_out_unmap_rx; 548 goto err_out_unmap_rx;
@@ -828,7 +825,7 @@ static int netdev_open(struct net_device *dev)
828 iowrite8(100, ioaddr + RxDMAPollPeriod); 825 iowrite8(100, ioaddr + RxDMAPollPeriod);
829 iowrite8(127, ioaddr + TxDMAPollPeriod); 826 iowrite8(127, ioaddr + TxDMAPollPeriod);
830 /* Fix DFE-580TX packet drop issue */ 827 /* Fix DFE-580TX packet drop issue */
831 if (np->pci_rev_id >= 0x14) 828 if (np->pci_dev->revision >= 0x14)
832 iowrite8(0x01, ioaddr + DebugCtrl1); 829 iowrite8(0x01, ioaddr + DebugCtrl1);
833 netif_start_queue(dev); 830 netif_start_queue(dev);
834 831
@@ -1194,7 +1191,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1194 hw_frame_id = ioread8(ioaddr + TxFrameId); 1191 hw_frame_id = ioread8(ioaddr + TxFrameId);
1195 } 1192 }
1196 1193
1197 if (np->pci_rev_id >= 0x14) { 1194 if (np->pci_dev->revision >= 0x14) {
1198 spin_lock(&np->lock); 1195 spin_lock(&np->lock);
1199 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { 1196 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1200 int entry = np->dirty_tx % TX_RING_SIZE; 1197 int entry = np->dirty_tx % TX_RING_SIZE;
@@ -1313,7 +1310,7 @@ static void rx_poll(unsigned long data)
1313 np->rx_buf_sz, 1310 np->rx_buf_sz,
1314 PCI_DMA_FROMDEVICE); 1311 PCI_DMA_FROMDEVICE);
1315 1312
1316 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1313 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1317 pci_dma_sync_single_for_device(np->pci_dev, 1314 pci_dma_sync_single_for_device(np->pci_dev,
1318 desc->frag[0].addr, 1315 desc->frag[0].addr,
1319 np->rx_buf_sz, 1316 np->rx_buf_sz,
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 15146a119230..8b35f13318ea 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3095,12 +3095,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3095 3095
3096#ifdef CONFIG_SPARC 3096#ifdef CONFIG_SPARC
3097 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); 3097 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3098 if (hp->hm_revision == 0xff) { 3098 if (hp->hm_revision == 0xff)
3099 unsigned char prev; 3099 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3100
3101 pci_read_config_byte(pdev, PCI_REVISION_ID, &prev);
3102 hp->hm_revision = 0xc0 | (prev & 0x0f);
3103 }
3104#else 3100#else
3105 /* works with this on non-sparc hosts */ 3101 /* works with this on non-sparc hosts */
3106 hp->hm_revision = 0x20; 3102 hp->hm_revision = 0x20;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 42722530ab24..053b7cb0d944 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev)
549 549
550 skb_reserve(skb, 2); /* 16 byte align */ 550 skb_reserve(skb, 2); /* 16 byte align */
551 skb_put(skb, len); /* make room */ 551 skb_put(skb, len); /* make room */
552 eth_copy_and_sum(skb, 552 skb_copy_to_linear_data(skb,
553 (unsigned char *)&(ib->rx_buf [entry][0]), 553 (unsigned char *)&(ib->rx_buf [entry][0]),
554 len, 0); 554 len);
555 skb->protocol = eth_type_trans(skb, dev); 555 skb->protocol = eth_type_trans(skb, dev);
556 netif_rx(skb); 556 netif_rx(skb);
557 dev->last_rx = jiffies; 557 dev->last_rx = jiffies;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index fa70e0b78af7..1b65ae8a1c7c 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep)
439 } else { 439 } else {
440 skb_reserve(skb, 2); 440 skb_reserve(skb, 2);
441 skb_put(skb, len); 441 skb_put(skb, len);
442 eth_copy_and_sum(skb, (unsigned char *) this_qbuf, 442 skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
443 len, 0); 443 len);
444 skb->protocol = eth_type_trans(skb, qep->dev); 444 skb->protocol = eth_type_trans(skb, qep->dev);
445 netif_rx(skb); 445 netif_rx(skb);
446 qep->dev->last_rx = jiffies; 446 qep->dev->last_rx = jiffies;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
new file mode 100644
index 000000000000..8a667c13faef
--- /dev/null
+++ b/drivers/net/sunvnet.c
@@ -0,0 +1,1164 @@
1/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/netdevice.h>
13#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
15
16#include <asm/vio.h>
17#include <asm/ldc.h>
18
19#include "sunvnet.h"
20
21#define DRV_MODULE_NAME "sunvnet"
22#define PFX DRV_MODULE_NAME ": "
23#define DRV_MODULE_VERSION "1.0"
24#define DRV_MODULE_RELDATE "June 25, 2007"
25
26static char version[] __devinitdata =
27 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
29MODULE_DESCRIPTION("Sun LDOM virtual network driver");
30MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_MODULE_VERSION);
32
33/* Ordered from largest major to lowest */
34static struct vio_version vnet_versions[] = {
35 { .major = 1, .minor = 0 },
36};
37
38static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
39{
40 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
41}
42
43static int vnet_handle_unknown(struct vnet_port *port, void *arg)
44{
45 struct vio_msg_tag *pkt = arg;
46
47 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
48 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
49 printk(KERN_ERR PFX "Resetting connection.\n");
50
51 ldc_disconnect(port->vio.lp);
52
53 return -ECONNRESET;
54}
55
56static int vnet_send_attr(struct vio_driver_state *vio)
57{
58 struct vnet_port *port = to_vnet_port(vio);
59 struct net_device *dev = port->vp->dev;
60 struct vio_net_attr_info pkt;
61 int i;
62
63 memset(&pkt, 0, sizeof(pkt));
64 pkt.tag.type = VIO_TYPE_CTRL;
65 pkt.tag.stype = VIO_SUBTYPE_INFO;
66 pkt.tag.stype_env = VIO_ATTR_INFO;
67 pkt.tag.sid = vio_send_sid(vio);
68 pkt.xfer_mode = VIO_DRING_MODE;
69 pkt.addr_type = VNET_ADDR_ETHERMAC;
70 pkt.ack_freq = 0;
71 for (i = 0; i < 6; i++)
72 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
73 pkt.mtu = ETH_FRAME_LEN;
74
75 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
76 "ackfreq[%u] mtu[%llu]\n",
77 pkt.xfer_mode, pkt.addr_type,
78 (unsigned long long) pkt.addr,
79 pkt.ack_freq,
80 (unsigned long long) pkt.mtu);
81
82 return vio_ldc_send(vio, &pkt, sizeof(pkt));
83}
84
85static int handle_attr_info(struct vio_driver_state *vio,
86 struct vio_net_attr_info *pkt)
87{
88 viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
89 "ackfreq[%u] mtu[%llu]\n",
90 pkt->xfer_mode, pkt->addr_type,
91 (unsigned long long) pkt->addr,
92 pkt->ack_freq,
93 (unsigned long long) pkt->mtu);
94
95 pkt->tag.sid = vio_send_sid(vio);
96
97 if (pkt->xfer_mode != VIO_DRING_MODE ||
98 pkt->addr_type != VNET_ADDR_ETHERMAC ||
99 pkt->mtu != ETH_FRAME_LEN) {
100 viodbg(HS, "SEND NET ATTR NACK\n");
101
102 pkt->tag.stype = VIO_SUBTYPE_NACK;
103
104 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
105
106 return -ECONNRESET;
107 } else {
108 viodbg(HS, "SEND NET ATTR ACK\n");
109
110 pkt->tag.stype = VIO_SUBTYPE_ACK;
111
112 return vio_ldc_send(vio, pkt, sizeof(*pkt));
113 }
114
115}
116
117static int handle_attr_ack(struct vio_driver_state *vio,
118 struct vio_net_attr_info *pkt)
119{
120 viodbg(HS, "GOT NET ATTR ACK\n");
121
122 return 0;
123}
124
125static int handle_attr_nack(struct vio_driver_state *vio,
126 struct vio_net_attr_info *pkt)
127{
128 viodbg(HS, "GOT NET ATTR NACK\n");
129
130 return -ECONNRESET;
131}
132
133static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
134{
135 struct vio_net_attr_info *pkt = arg;
136
137 switch (pkt->tag.stype) {
138 case VIO_SUBTYPE_INFO:
139 return handle_attr_info(vio, pkt);
140
141 case VIO_SUBTYPE_ACK:
142 return handle_attr_ack(vio, pkt);
143
144 case VIO_SUBTYPE_NACK:
145 return handle_attr_nack(vio, pkt);
146
147 default:
148 return -ECONNRESET;
149 }
150}
151
152static void vnet_handshake_complete(struct vio_driver_state *vio)
153{
154 struct vio_dring_state *dr;
155
156 dr = &vio->drings[VIO_DRIVER_RX_RING];
157 dr->snd_nxt = dr->rcv_nxt = 1;
158
159 dr = &vio->drings[VIO_DRIVER_TX_RING];
160 dr->snd_nxt = dr->rcv_nxt = 1;
161}
162
163/* The hypervisor interface that implements copying to/from imported
164 * memory from another domain requires that copies are done to 8-byte
165 * aligned buffers, and that the lengths of such copies are also 8-byte
166 * multiples.
167 *
168 * So we align skb->data to an 8-byte multiple and pad-out the data
169 * area so we can round the copy length up to the next multiple of
170 * 8 for the copy.
171 *
172 * The transmitter puts the actual start of the packet 6 bytes into
173 * the buffer it sends over, so that the IP headers after the ethernet
174 * header are aligned properly. These 6 bytes are not in the descriptor
175 * length, they are simply implied. This offset is represented using
176 * the VNET_PACKET_SKIP macro.
177 */
178static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
179 unsigned int len)
180{
181 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
182 unsigned long addr, off;
183
184 if (unlikely(!skb))
185 return NULL;
186
187 addr = (unsigned long) skb->data;
188 off = ((addr + 7UL) & ~7UL) - addr;
189 if (off)
190 skb_reserve(skb, off);
191
192 return skb;
193}
194
195static int vnet_rx_one(struct vnet_port *port, unsigned int len,
196 struct ldc_trans_cookie *cookies, int ncookies)
197{
198 struct net_device *dev = port->vp->dev;
199 unsigned int copy_len;
200 struct sk_buff *skb;
201 int err;
202
203 err = -EMSGSIZE;
204 if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
205 dev->stats.rx_length_errors++;
206 goto out_dropped;
207 }
208
209 skb = alloc_and_align_skb(dev, len);
210 err = -ENOMEM;
211 if (unlikely(!skb)) {
212 dev->stats.rx_missed_errors++;
213 goto out_dropped;
214 }
215
216 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
217 skb_put(skb, copy_len);
218 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
219 skb->data, copy_len, 0,
220 cookies, ncookies);
221 if (unlikely(err < 0)) {
222 dev->stats.rx_frame_errors++;
223 goto out_free_skb;
224 }
225
226 skb_pull(skb, VNET_PACKET_SKIP);
227 skb_trim(skb, len);
228 skb->protocol = eth_type_trans(skb, dev);
229
230 dev->stats.rx_packets++;
231 dev->stats.rx_bytes += len;
232
233 netif_rx(skb);
234
235 return 0;
236
237out_free_skb:
238 kfree_skb(skb);
239
240out_dropped:
241 dev->stats.rx_dropped++;
242 return err;
243}
244
245static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
246 u32 start, u32 end, u8 vio_dring_state)
247{
248 struct vio_dring_data hdr = {
249 .tag = {
250 .type = VIO_TYPE_DATA,
251 .stype = VIO_SUBTYPE_ACK,
252 .stype_env = VIO_DRING_DATA,
253 .sid = vio_send_sid(&port->vio),
254 },
255 .dring_ident = dr->ident,
256 .start_idx = start,
257 .end_idx = end,
258 .state = vio_dring_state,
259 };
260 int err, delay;
261
262 hdr.seq = dr->snd_nxt;
263 delay = 1;
264 do {
265 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
266 if (err > 0) {
267 dr->snd_nxt++;
268 break;
269 }
270 udelay(delay);
271 if ((delay <<= 1) > 128)
272 delay = 128;
273 } while (err == -EAGAIN);
274
275 return err;
276}
277
278static u32 next_idx(u32 idx, struct vio_dring_state *dr)
279{
280 if (++idx == dr->num_entries)
281 idx = 0;
282 return idx;
283}
284
285static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
286{
287 if (idx == 0)
288 idx = dr->num_entries - 1;
289 else
290 idx--;
291
292 return idx;
293}
294
295static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
296 struct vio_dring_state *dr,
297 u32 index)
298{
299 struct vio_net_desc *desc = port->vio.desc_buf;
300 int err;
301
302 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
303 (index * dr->entry_size),
304 dr->cookies, dr->ncookies);
305 if (err < 0)
306 return ERR_PTR(err);
307
308 return desc;
309}
310
311static int put_rx_desc(struct vnet_port *port,
312 struct vio_dring_state *dr,
313 struct vio_net_desc *desc,
314 u32 index)
315{
316 int err;
317
318 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
319 (index * dr->entry_size),
320 dr->cookies, dr->ncookies);
321 if (err < 0)
322 return err;
323
324 return 0;
325}
326
327static int vnet_walk_rx_one(struct vnet_port *port,
328 struct vio_dring_state *dr,
329 u32 index, int *needs_ack)
330{
331 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
332 struct vio_driver_state *vio = &port->vio;
333 int err;
334
335 if (IS_ERR(desc))
336 return PTR_ERR(desc);
337
338 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n",
339 desc->hdr.state, desc->hdr.ack,
340 desc->size, desc->ncookies,
341 desc->cookies[0].cookie_addr,
342 desc->cookies[0].cookie_size);
343
344 if (desc->hdr.state != VIO_DESC_READY)
345 return 1;
346 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
347 if (err == -ECONNRESET)
348 return err;
349 desc->hdr.state = VIO_DESC_DONE;
350 err = put_rx_desc(port, dr, desc, index);
351 if (err < 0)
352 return err;
353 *needs_ack = desc->hdr.ack;
354 return 0;
355}
356
357static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
358 u32 start, u32 end)
359{
360 struct vio_driver_state *vio = &port->vio;
361 int ack_start = -1, ack_end = -1;
362
363 end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
364
365 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
366
367 while (start != end) {
368 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
369 if (err == -ECONNRESET)
370 return err;
371 if (err != 0)
372 break;
373 if (ack_start == -1)
374 ack_start = start;
375 ack_end = start;
376 start = next_idx(start, dr);
377 if (ack && start != end) {
378 err = vnet_send_ack(port, dr, ack_start, ack_end,
379 VIO_DRING_ACTIVE);
380 if (err == -ECONNRESET)
381 return err;
382 ack_start = -1;
383 }
384 }
385 if (unlikely(ack_start == -1))
386 ack_start = ack_end = prev_idx(start, dr);
387 return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
388}
389
390static int vnet_rx(struct vnet_port *port, void *msgbuf)
391{
392 struct vio_dring_data *pkt = msgbuf;
393 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
394 struct vio_driver_state *vio = &port->vio;
395
396 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n",
397 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
398
399 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
400 return 0;
401 if (unlikely(pkt->seq != dr->rcv_nxt)) {
402 printk(KERN_ERR PFX "RX out of sequence seq[0x%lx] "
403 "rcv_nxt[0x%lx]\n", pkt->seq, dr->rcv_nxt);
404 return 0;
405 }
406
407 dr->rcv_nxt++;
408
409 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
410
411 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
412}
413
414static int idx_is_pending(struct vio_dring_state *dr, u32 end)
415{
416 u32 idx = dr->cons;
417 int found = 0;
418
419 while (idx != dr->prod) {
420 if (idx == end) {
421 found = 1;
422 break;
423 }
424 idx = next_idx(idx, dr);
425 }
426 return found;
427}
428
429static int vnet_ack(struct vnet_port *port, void *msgbuf)
430{
431 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
432 struct vio_dring_data *pkt = msgbuf;
433 struct net_device *dev;
434 struct vnet *vp;
435 u32 end;
436
437 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
438 return 0;
439
440 end = pkt->end_idx;
441 if (unlikely(!idx_is_pending(dr, end)))
442 return 0;
443
444 dr->cons = next_idx(end, dr);
445
446 vp = port->vp;
447 dev = vp->dev;
448 if (unlikely(netif_queue_stopped(dev) &&
449 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
450 return 1;
451
452 return 0;
453}
454
455static int vnet_nack(struct vnet_port *port, void *msgbuf)
456{
457 /* XXX just reset or similar XXX */
458 return 0;
459}
460
461static void maybe_tx_wakeup(struct vnet *vp)
462{
463 struct net_device *dev = vp->dev;
464
465 netif_tx_lock(dev);
466 if (likely(netif_queue_stopped(dev))) {
467 struct vnet_port *port;
468 int wake = 1;
469
470 list_for_each_entry(port, &vp->port_list, list) {
471 struct vio_dring_state *dr;
472
473 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
474 if (vnet_tx_dring_avail(dr) <
475 VNET_TX_WAKEUP_THRESH(dr)) {
476 wake = 0;
477 break;
478 }
479 }
480 if (wake)
481 netif_wake_queue(dev);
482 }
483 netif_tx_unlock(dev);
484}
485
486static void vnet_event(void *arg, int event)
487{
488 struct vnet_port *port = arg;
489 struct vio_driver_state *vio = &port->vio;
490 unsigned long flags;
491 int tx_wakeup, err;
492
493 spin_lock_irqsave(&vio->lock, flags);
494
495 if (unlikely(event == LDC_EVENT_RESET ||
496 event == LDC_EVENT_UP)) {
497 vio_link_state_change(vio, event);
498 spin_unlock_irqrestore(&vio->lock, flags);
499
500 return;
501 }
502
503 if (unlikely(event != LDC_EVENT_DATA_READY)) {
504 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
505 spin_unlock_irqrestore(&vio->lock, flags);
506 return;
507 }
508
509 tx_wakeup = err = 0;
510 while (1) {
511 union {
512 struct vio_msg_tag tag;
513 u64 raw[8];
514 } msgbuf;
515
516 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
517 if (unlikely(err < 0)) {
518 if (err == -ECONNRESET)
519 vio_conn_reset(vio);
520 break;
521 }
522 if (err == 0)
523 break;
524 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
525 msgbuf.tag.type,
526 msgbuf.tag.stype,
527 msgbuf.tag.stype_env,
528 msgbuf.tag.sid);
529 err = vio_validate_sid(vio, &msgbuf.tag);
530 if (err < 0)
531 break;
532
533 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
534 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
535 err = vnet_rx(port, &msgbuf);
536 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
537 err = vnet_ack(port, &msgbuf);
538 if (err > 0)
539 tx_wakeup |= err;
540 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
541 err = vnet_nack(port, &msgbuf);
542 }
543 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
544 err = vio_control_pkt_engine(vio, &msgbuf);
545 if (err)
546 break;
547 } else {
548 err = vnet_handle_unknown(port, &msgbuf);
549 }
550 if (err == -ECONNRESET)
551 break;
552 }
553 spin_unlock(&vio->lock);
554 if (unlikely(tx_wakeup && err != -ECONNRESET))
555 maybe_tx_wakeup(port->vp);
556 local_irq_restore(flags);
557}
558
559static int __vnet_tx_trigger(struct vnet_port *port)
560{
561 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
562 struct vio_dring_data hdr = {
563 .tag = {
564 .type = VIO_TYPE_DATA,
565 .stype = VIO_SUBTYPE_INFO,
566 .stype_env = VIO_DRING_DATA,
567 .sid = vio_send_sid(&port->vio),
568 },
569 .dring_ident = dr->ident,
570 .start_idx = dr->prod,
571 .end_idx = (u32) -1,
572 };
573 int err, delay;
574
575 hdr.seq = dr->snd_nxt;
576 delay = 1;
577 do {
578 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
579 if (err > 0) {
580 dr->snd_nxt++;
581 break;
582 }
583 udelay(delay);
584 if ((delay <<= 1) > 128)
585 delay = 128;
586 } while (err == -EAGAIN);
587
588 return err;
589}
590
591struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
592{
593 unsigned int hash = vnet_hashfn(skb->data);
594 struct hlist_head *hp = &vp->port_hash[hash];
595 struct hlist_node *n;
596 struct vnet_port *port;
597
598 hlist_for_each_entry(port, n, hp, hash) {
599 if (!compare_ether_addr(port->raddr, skb->data))
600 return port;
601 }
602 port = NULL;
603 if (!list_empty(&vp->port_list))
604 port = list_entry(vp->port_list.next, struct vnet_port, list);
605
606 return port;
607}
608
609struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
610{
611 struct vnet_port *ret;
612 unsigned long flags;
613
614 spin_lock_irqsave(&vp->lock, flags);
615 ret = __tx_port_find(vp, skb);
616 spin_unlock_irqrestore(&vp->lock, flags);
617
618 return ret;
619}
620
621static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
622{
623 struct vnet *vp = netdev_priv(dev);
624 struct vnet_port *port = tx_port_find(vp, skb);
625 struct vio_dring_state *dr;
626 struct vio_net_desc *d;
627 unsigned long flags;
628 unsigned int len;
629 void *tx_buf;
630 int i, err;
631
632 if (unlikely(!port))
633 goto out_dropped;
634
635 spin_lock_irqsave(&port->vio.lock, flags);
636
637 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
638 if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
639 if (!netif_queue_stopped(dev)) {
640 netif_stop_queue(dev);
641
642 /* This is a hard error, log it. */
643 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
644 "queue awake!\n", dev->name);
645 dev->stats.tx_errors++;
646 }
647 spin_unlock_irqrestore(&port->vio.lock, flags);
648 return NETDEV_TX_BUSY;
649 }
650
651 d = vio_dring_cur(dr);
652
653 tx_buf = port->tx_bufs[dr->prod].buf;
654 skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
655
656 len = skb->len;
657 if (len < ETH_ZLEN) {
658 len = ETH_ZLEN;
659 memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
660 }
661
662 d->hdr.ack = VIO_ACK_ENABLE;
663 d->size = len;
664 d->ncookies = port->tx_bufs[dr->prod].ncookies;
665 for (i = 0; i < d->ncookies; i++)
666 d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
667
668 /* This has to be a non-SMP write barrier because we are writing
669 * to memory which is shared with the peer LDOM.
670 */
671 wmb();
672
673 d->hdr.state = VIO_DESC_READY;
674
675 err = __vnet_tx_trigger(port);
676 if (unlikely(err < 0)) {
677 printk(KERN_INFO PFX "%s: TX trigger error %d\n",
678 dev->name, err);
679 d->hdr.state = VIO_DESC_FREE;
680 dev->stats.tx_carrier_errors++;
681 goto out_dropped_unlock;
682 }
683
684 dev->stats.tx_packets++;
685 dev->stats.tx_bytes += skb->len;
686
687 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
688 if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
689 netif_stop_queue(dev);
690 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
691 netif_wake_queue(dev);
692 }
693
694 spin_unlock_irqrestore(&port->vio.lock, flags);
695
696 dev_kfree_skb(skb);
697
698 dev->trans_start = jiffies;
699 return NETDEV_TX_OK;
700
701out_dropped_unlock:
702 spin_unlock_irqrestore(&port->vio.lock, flags);
703
704out_dropped:
705 dev_kfree_skb(skb);
706 dev->stats.tx_dropped++;
707 return NETDEV_TX_OK;
708}
709
710static void vnet_tx_timeout(struct net_device *dev)
711{
712 /* XXX Implement me XXX */
713}
714
715static int vnet_open(struct net_device *dev)
716{
717 netif_carrier_on(dev);
718 netif_start_queue(dev);
719
720 return 0;
721}
722
723static int vnet_close(struct net_device *dev)
724{
725 netif_stop_queue(dev);
726 netif_carrier_off(dev);
727
728 return 0;
729}
730
731static void vnet_set_rx_mode(struct net_device *dev)
732{
733 /* XXX Implement multicast support XXX */
734}
735
736static int vnet_change_mtu(struct net_device *dev, int new_mtu)
737{
738 if (new_mtu != ETH_DATA_LEN)
739 return -EINVAL;
740
741 dev->mtu = new_mtu;
742 return 0;
743}
744
745static int vnet_set_mac_addr(struct net_device *dev, void *p)
746{
747 return -EINVAL;
748}
749
750static void vnet_get_drvinfo(struct net_device *dev,
751 struct ethtool_drvinfo *info)
752{
753 strcpy(info->driver, DRV_MODULE_NAME);
754 strcpy(info->version, DRV_MODULE_VERSION);
755}
756
757static u32 vnet_get_msglevel(struct net_device *dev)
758{
759 struct vnet *vp = netdev_priv(dev);
760 return vp->msg_enable;
761}
762
763static void vnet_set_msglevel(struct net_device *dev, u32 value)
764{
765 struct vnet *vp = netdev_priv(dev);
766 vp->msg_enable = value;
767}
768
769static const struct ethtool_ops vnet_ethtool_ops = {
770 .get_drvinfo = vnet_get_drvinfo,
771 .get_msglevel = vnet_get_msglevel,
772 .set_msglevel = vnet_set_msglevel,
773 .get_link = ethtool_op_get_link,
774 .get_perm_addr = ethtool_op_get_perm_addr,
775};
776
777static void vnet_port_free_tx_bufs(struct vnet_port *port)
778{
779 struct vio_dring_state *dr;
780 int i;
781
782 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
783 if (dr->base) {
784 ldc_free_exp_dring(port->vio.lp, dr->base,
785 (dr->entry_size * dr->num_entries),
786 dr->cookies, dr->ncookies);
787 dr->base = NULL;
788 dr->entry_size = 0;
789 dr->num_entries = 0;
790 dr->pending = 0;
791 dr->ncookies = 0;
792 }
793
794 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
795 void *buf = port->tx_bufs[i].buf;
796
797 if (!buf)
798 continue;
799
800 ldc_unmap(port->vio.lp,
801 port->tx_bufs[i].cookies,
802 port->tx_bufs[i].ncookies);
803
804 kfree(buf);
805 port->tx_bufs[i].buf = NULL;
806 }
807}
808
809static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
810{
811 struct vio_dring_state *dr;
812 unsigned long len;
813 int i, err, ncookies;
814 void *dring;
815
816 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
817 void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
818 int map_len = (ETH_FRAME_LEN + 7) & ~7;
819
820 err = -ENOMEM;
821 if (!buf) {
822 printk(KERN_ERR "TX buffer allocation failure\n");
823 goto err_out;
824 }
825 err = -EFAULT;
826 if ((unsigned long)buf & (8UL - 1)) {
827 printk(KERN_ERR "TX buffer misaligned\n");
828 kfree(buf);
829 goto err_out;
830 }
831
832 err = ldc_map_single(port->vio.lp, buf, map_len,
833 port->tx_bufs[i].cookies, 2,
834 (LDC_MAP_SHADOW |
835 LDC_MAP_DIRECT |
836 LDC_MAP_RW));
837 if (err < 0) {
838 kfree(buf);
839 goto err_out;
840 }
841 port->tx_bufs[i].buf = buf;
842 port->tx_bufs[i].ncookies = err;
843 }
844
845 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
846
847 len = (VNET_TX_RING_SIZE *
848 (sizeof(struct vio_net_desc) +
849 (sizeof(struct ldc_trans_cookie) * 2)));
850
851 ncookies = VIO_MAX_RING_COOKIES;
852 dring = ldc_alloc_exp_dring(port->vio.lp, len,
853 dr->cookies, &ncookies,
854 (LDC_MAP_SHADOW |
855 LDC_MAP_DIRECT |
856 LDC_MAP_RW));
857 if (IS_ERR(dring)) {
858 err = PTR_ERR(dring);
859 goto err_out;
860 }
861
862 dr->base = dring;
863 dr->entry_size = (sizeof(struct vio_net_desc) +
864 (sizeof(struct ldc_trans_cookie) * 2));
865 dr->num_entries = VNET_TX_RING_SIZE;
866 dr->prod = dr->cons = 0;
867 dr->pending = VNET_TX_RING_SIZE;
868 dr->ncookies = ncookies;
869
870 return 0;
871
872err_out:
873 vnet_port_free_tx_bufs(port);
874
875 return err;
876}
877
878static struct ldc_channel_config vnet_ldc_cfg = {
879 .event = vnet_event,
880 .mtu = 64,
881 .mode = LDC_MODE_UNRELIABLE,
882};
883
884static struct vio_driver_ops vnet_vio_ops = {
885 .send_attr = vnet_send_attr,
886 .handle_attr = vnet_handle_attr,
887 .handshake_complete = vnet_handshake_complete,
888};
889
890const char *remote_macaddr_prop = "remote-mac-address";
891
892static int __devinit vnet_port_probe(struct vio_dev *vdev,
893 const struct vio_device_id *id)
894{
895 struct mdesc_handle *hp;
896 struct vnet_port *port;
897 unsigned long flags;
898 struct vnet *vp;
899 const u64 *rmac;
900 int len, i, err, switch_port;
901
902 vp = dev_get_drvdata(vdev->dev.parent);
903 if (!vp) {
904 printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
905 return -ENODEV;
906 }
907
908 hp = mdesc_grab();
909
910 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
911 err = -ENODEV;
912 if (!rmac) {
913 printk(KERN_ERR PFX "Port lacks %s property.\n",
914 remote_macaddr_prop);
915 goto err_out_put_mdesc;
916 }
917
918 port = kzalloc(sizeof(*port), GFP_KERNEL);
919 err = -ENOMEM;
920 if (!port) {
921 printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
922 goto err_out_put_mdesc;
923 }
924
925 for (i = 0; i < ETH_ALEN; i++)
926 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
927
928 port->vp = vp;
929
930 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
931 vnet_versions, ARRAY_SIZE(vnet_versions),
932 &vnet_vio_ops, vp->dev->name);
933 if (err)
934 goto err_out_free_port;
935
936 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
937 if (err)
938 goto err_out_free_port;
939
940 err = vnet_port_alloc_tx_bufs(port);
941 if (err)
942 goto err_out_free_ldc;
943
944 INIT_HLIST_NODE(&port->hash);
945 INIT_LIST_HEAD(&port->list);
946
947 switch_port = 0;
948 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
949 switch_port = 1;
950
951 spin_lock_irqsave(&vp->lock, flags);
952 if (switch_port)
953 list_add(&port->list, &vp->port_list);
954 else
955 list_add_tail(&port->list, &vp->port_list);
956 hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
957 spin_unlock_irqrestore(&vp->lock, flags);
958
959 dev_set_drvdata(&vdev->dev, port);
960
961 printk(KERN_INFO "%s: PORT ( remote-mac ", vp->dev->name);
962 for (i = 0; i < 6; i++)
963 printk("%2.2x%c", port->raddr[i], i == 5 ? ' ' : ':');
964 if (switch_port)
965 printk("switch-port ");
966 printk(")\n");
967
968 vio_port_up(&port->vio);
969
970 mdesc_release(hp);
971
972 return 0;
973
974err_out_free_ldc:
975 vio_ldc_free(&port->vio);
976
977err_out_free_port:
978 kfree(port);
979
980err_out_put_mdesc:
981 mdesc_release(hp);
982 return err;
983}
984
985static int vnet_port_remove(struct vio_dev *vdev)
986{
987 struct vnet_port *port = dev_get_drvdata(&vdev->dev);
988
989 if (port) {
990 struct vnet *vp = port->vp;
991 unsigned long flags;
992
993 del_timer_sync(&port->vio.timer);
994
995 spin_lock_irqsave(&vp->lock, flags);
996 list_del(&port->list);
997 hlist_del(&port->hash);
998 spin_unlock_irqrestore(&vp->lock, flags);
999
1000 vnet_port_free_tx_bufs(port);
1001 vio_ldc_free(&port->vio);
1002
1003 dev_set_drvdata(&vdev->dev, NULL);
1004
1005 kfree(port);
1006 }
1007 return 0;
1008}
1009
1010static struct vio_device_id vnet_port_match[] = {
1011 {
1012 .type = "vnet-port",
1013 },
1014 {},
1015};
1016MODULE_DEVICE_TABLE(vio, vnet_match);
1017
1018static struct vio_driver vnet_port_driver = {
1019 .id_table = vnet_port_match,
1020 .probe = vnet_port_probe,
1021 .remove = vnet_port_remove,
1022 .driver = {
1023 .name = "vnet_port",
1024 .owner = THIS_MODULE,
1025 }
1026};
1027
1028const char *local_mac_prop = "local-mac-address";
1029
1030static int __devinit vnet_probe(struct vio_dev *vdev,
1031 const struct vio_device_id *id)
1032{
1033 static int vnet_version_printed;
1034 struct mdesc_handle *hp;
1035 struct net_device *dev;
1036 struct vnet *vp;
1037 const u64 *mac;
1038 int err, i, len;
1039
1040 if (vnet_version_printed++ == 0)
1041 printk(KERN_INFO "%s", version);
1042
1043 hp = mdesc_grab();
1044
1045 mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len);
1046 if (!mac) {
1047 printk(KERN_ERR PFX "vnet lacks %s property.\n",
1048 local_mac_prop);
1049 err = -ENODEV;
1050 goto err_out;
1051 }
1052
1053 dev = alloc_etherdev(sizeof(*vp));
1054 if (!dev) {
1055 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1056 err = -ENOMEM;
1057 goto err_out;
1058 }
1059
1060 for (i = 0; i < ETH_ALEN; i++)
1061 dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff;
1062
1063 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1064
1065 SET_NETDEV_DEV(dev, &vdev->dev);
1066
1067 vp = netdev_priv(dev);
1068
1069 spin_lock_init(&vp->lock);
1070 vp->dev = dev;
1071 vp->vdev = vdev;
1072
1073 INIT_LIST_HEAD(&vp->port_list);
1074 for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1075 INIT_HLIST_HEAD(&vp->port_hash[i]);
1076
1077 dev->open = vnet_open;
1078 dev->stop = vnet_close;
1079 dev->set_multicast_list = vnet_set_rx_mode;
1080 dev->set_mac_address = vnet_set_mac_addr;
1081 dev->tx_timeout = vnet_tx_timeout;
1082 dev->ethtool_ops = &vnet_ethtool_ops;
1083 dev->watchdog_timeo = VNET_TX_TIMEOUT;
1084 dev->change_mtu = vnet_change_mtu;
1085 dev->hard_start_xmit = vnet_start_xmit;
1086
1087 err = register_netdev(dev);
1088 if (err) {
1089 printk(KERN_ERR PFX "Cannot register net device, "
1090 "aborting.\n");
1091 goto err_out_free_dev;
1092 }
1093
1094 printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
1095
1096 for (i = 0; i < 6; i++)
1097 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
1098
1099 dev_set_drvdata(&vdev->dev, vp);
1100
1101 mdesc_release(hp);
1102
1103 return 0;
1104
1105err_out_free_dev:
1106 free_netdev(dev);
1107
1108err_out:
1109 mdesc_release(hp);
1110 return err;
1111}
1112
1113static int vnet_remove(struct vio_dev *vdev)
1114{
1115
1116 struct vnet *vp = dev_get_drvdata(&vdev->dev);
1117
1118 if (vp) {
1119 /* XXX unregister port, or at least check XXX */
1120 unregister_netdevice(vp->dev);
1121 dev_set_drvdata(&vdev->dev, NULL);
1122 }
1123 return 0;
1124}
1125
1126static struct vio_device_id vnet_match[] = {
1127 {
1128 .type = "network",
1129 },
1130 {},
1131};
1132MODULE_DEVICE_TABLE(vio, vnet_match);
1133
1134static struct vio_driver vnet_driver = {
1135 .id_table = vnet_match,
1136 .probe = vnet_probe,
1137 .remove = vnet_remove,
1138 .driver = {
1139 .name = "vnet",
1140 .owner = THIS_MODULE,
1141 }
1142};
1143
1144static int __init vnet_init(void)
1145{
1146 int err = vio_register_driver(&vnet_driver);
1147
1148 if (!err) {
1149 err = vio_register_driver(&vnet_port_driver);
1150 if (err)
1151 vio_unregister_driver(&vnet_driver);
1152 }
1153
1154 return err;
1155}
1156
1157static void __exit vnet_exit(void)
1158{
1159 vio_unregister_driver(&vnet_port_driver);
1160 vio_unregister_driver(&vnet_driver);
1161}
1162
1163module_init(vnet_init);
1164module_exit(vnet_exit);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
new file mode 100644
index 000000000000..1c887302d46d
--- /dev/null
+++ b/drivers/net/sunvnet.h
@@ -0,0 +1,70 @@
1#ifndef _SUNVNET_H
2#define _SUNVNET_H
3
4#define DESC_NCOOKIES(entry_size) \
5 ((entry_size) - sizeof(struct vio_net_desc))
6
7/* length of time before we decide the hardware is borked,
8 * and dev->tx_timeout() should be called to fix the problem
9 */
10#define VNET_TX_TIMEOUT (5 * HZ)
11
12#define VNET_TX_RING_SIZE 512
13#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
14
15/* VNET packets are sent in buffers with the first 6 bytes skipped
16 * so that after the ethernet header the IPv4/IPv6 headers are aligned
17 * properly.
18 */
19#define VNET_PACKET_SKIP 6
20
21struct vnet_tx_entry {
22 void *buf;
23 unsigned int ncookies;
24 struct ldc_trans_cookie cookies[2];
25};
26
27struct vnet;
28struct vnet_port {
29 struct vio_driver_state vio;
30
31 struct hlist_node hash;
32 u8 raddr[ETH_ALEN];
33
34 struct vnet *vp;
35
36 struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
37
38 struct list_head list;
39};
40
41static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
42{
43 return container_of(vio, struct vnet_port, vio);
44}
45
46#define VNET_PORT_HASH_SIZE 16
47#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
48
49static inline unsigned int vnet_hashfn(u8 *mac)
50{
51 unsigned int val = mac[4] ^ mac[5];
52
53 return val & (VNET_PORT_HASH_MASK);
54}
55
56struct vnet {
57 /* Protects port_list and port_hash. */
58 spinlock_t lock;
59
60 struct net_device *dev;
61
62 u32 msg_enable;
63 struct vio_dev *vdev;
64
65 struct list_head port_list;
66
67 struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
68};
69
70#endif /* _SUNVNET_H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 2f3184184ad9..5ee14764fd74 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.77" 67#define DRV_MODULE_VERSION "3.78"
68#define DRV_MODULE_RELDATE "May 31, 2007" 68#define DRV_MODULE_RELDATE "July 11, 2007"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -721,6 +721,44 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
721 return ret; 721 return ret;
722} 722}
723 723
724static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
725{
726 u32 phy;
727
728 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
729 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
730 return;
731
732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
733 u32 ephy;
734
735 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
736 tg3_writephy(tp, MII_TG3_EPHY_TEST,
737 ephy | MII_TG3_EPHY_SHADOW_EN);
738 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
739 if (enable)
740 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
741 else
742 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
743 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
744 }
745 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
746 }
747 } else {
748 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
749 MII_TG3_AUXCTL_SHDWSEL_MISC;
750 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
751 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
752 if (enable)
753 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
754 else
755 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
756 phy |= MII_TG3_AUXCTL_MISC_WREN;
757 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
758 }
759 }
760}
761
724static void tg3_phy_set_wirespeed(struct tg3 *tp) 762static void tg3_phy_set_wirespeed(struct tg3 *tp)
725{ 763{
726 u32 val; 764 u32 val;
@@ -1045,23 +1083,11 @@ out:
1045 } 1083 }
1046 1084
1047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048 u32 phy_reg;
1049
1050 /* adjust output voltage */ 1086 /* adjust output voltage */
1051 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); 1087 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054 u32 phy_reg2;
1055
1056 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057 phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058 /* Enable auto-MDIX */
1059 if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061 tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062 }
1063 } 1088 }
1064 1089
1090 tg3_phy_toggle_automdix(tp, 1);
1065 tg3_phy_set_wirespeed(tp); 1091 tg3_phy_set_wirespeed(tp);
1066 return 0; 1092 return 0;
1067} 1093}
@@ -1162,6 +1188,19 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1162 } 1188 }
1163} 1189}
1164 1190
1191static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1192{
1193 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1194 return 1;
1195 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1196 if (speed != SPEED_10)
1197 return 1;
1198 } else if (speed == SPEED_10)
1199 return 1;
1200
1201 return 0;
1202}
1203
1165static int tg3_setup_phy(struct tg3 *, int); 1204static int tg3_setup_phy(struct tg3 *, int);
1166 1205
1167#define RESET_KIND_SHUTDOWN 0 1206#define RESET_KIND_SHUTDOWN 0
@@ -1320,9 +1359,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1320 else 1359 else
1321 mac_mode = MAC_MODE_PORT_MODE_MII; 1360 mac_mode = MAC_MODE_PORT_MODE_MII;
1322 1361
1323 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 || 1362 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1324 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)) 1363 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1325 mac_mode |= MAC_MODE_LINK_POLARITY; 1364 ASIC_REV_5700) {
1365 u32 speed = (tp->tg3_flags &
1366 TG3_FLAG_WOL_SPEED_100MB) ?
1367 SPEED_100 : SPEED_10;
1368 if (tg3_5700_link_polarity(tp, speed))
1369 mac_mode |= MAC_MODE_LINK_POLARITY;
1370 else
1371 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1372 }
1326 } else { 1373 } else {
1327 mac_mode = MAC_MODE_PORT_MODE_TBI; 1374 mac_mode = MAC_MODE_PORT_MODE_TBI;
1328 } 1375 }
@@ -1990,15 +2037,12 @@ relink:
1990 if (tp->link_config.active_duplex == DUPLEX_HALF) 2037 if (tp->link_config.active_duplex == DUPLEX_HALF)
1991 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 2038 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1992 2039
1993 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1995 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) || 2041 if (current_link_up == 1 &&
1996 (current_link_up == 1 && 2042 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1997 tp->link_config.active_speed == SPEED_10))
1998 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1999 } else {
2000 if (current_link_up == 1)
2001 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 2043 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2044 else
2045 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2002 } 2046 }
2003 2047
2004 /* ??? Without this setting Netgear GA302T PHY does not 2048 /* ??? Without this setting Netgear GA302T PHY does not
@@ -2639,6 +2683,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2639 2683
2640 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 2684 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2641 udelay(40); 2685 udelay(40);
2686
2687 tw32_f(MAC_MODE, tp->mac_mode);
2688 udelay(40);
2642 } 2689 }
2643 2690
2644out: 2691out:
@@ -2698,10 +2745,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2698 else 2745 else
2699 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 2746 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2700 2747
2701 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2702 tw32_f(MAC_MODE, tp->mac_mode);
2703 udelay(40);
2704
2705 tp->hw_status->status = 2748 tp->hw_status->status =
2706 (SD_STATUS_UPDATED | 2749 (SD_STATUS_UPDATED |
2707 (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); 2750 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
@@ -3512,9 +3555,9 @@ static inline int tg3_irq_sync(struct tg3 *tp)
3512 */ 3555 */
3513static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 3556static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3514{ 3557{
3558 spin_lock_bh(&tp->lock);
3515 if (irq_sync) 3559 if (irq_sync)
3516 tg3_irq_quiesce(tp); 3560 tg3_irq_quiesce(tp);
3517 spin_lock_bh(&tp->lock);
3518} 3561}
3519 3562
3520static inline void tg3_full_unlock(struct tg3 *tp) 3563static inline void tg3_full_unlock(struct tg3 *tp)
@@ -6444,6 +6487,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6444 6487
6445 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 6488 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6446 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 6489 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6490 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6491 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6492 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6493 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6447 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 6494 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6448 udelay(40); 6495 udelay(40);
6449 6496
@@ -8271,7 +8318,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8271 8318
8272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 8319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8274 ethtool_op_set_tx_hw_csum(dev, data); 8321 ethtool_op_set_tx_ipv6_csum(dev, data);
8275 else 8322 else
8276 ethtool_op_set_tx_csum(dev, data); 8323 ethtool_op_set_tx_csum(dev, data);
8277 8324
@@ -8805,7 +8852,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8805 return 0; 8852 return 0;
8806 8853
8807 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8854 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8808 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY; 8855 MAC_MODE_PORT_INT_LPBACK;
8856 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8857 mac_mode |= MAC_MODE_LINK_POLARITY;
8809 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 8858 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8810 mac_mode |= MAC_MODE_PORT_MODE_MII; 8859 mac_mode |= MAC_MODE_PORT_MODE_MII;
8811 else 8860 else
@@ -8824,19 +8873,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8824 phytest | MII_TG3_EPHY_SHADOW_EN); 8873 phytest | MII_TG3_EPHY_SHADOW_EN);
8825 if (!tg3_readphy(tp, 0x1b, &phy)) 8874 if (!tg3_readphy(tp, 0x1b, &phy))
8826 tg3_writephy(tp, 0x1b, phy & ~0x20); 8875 tg3_writephy(tp, 0x1b, phy & ~0x20);
8827 if (!tg3_readphy(tp, 0x10, &phy))
8828 tg3_writephy(tp, 0x10, phy & ~0x4000);
8829 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); 8876 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8830 } 8877 }
8831 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 8878 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8832 } else 8879 } else
8833 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 8880 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8834 8881
8882 tg3_phy_toggle_automdix(tp, 0);
8883
8835 tg3_writephy(tp, MII_BMCR, val); 8884 tg3_writephy(tp, MII_BMCR, val);
8836 udelay(40); 8885 udelay(40);
8837 8886
8838 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8887 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8839 MAC_MODE_LINK_POLARITY;
8840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 8888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8841 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); 8889 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8842 mac_mode |= MAC_MODE_PORT_MODE_MII; 8890 mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -8849,8 +8897,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8849 udelay(10); 8897 udelay(10);
8850 tw32_f(MAC_RX_MODE, tp->rx_mode); 8898 tw32_f(MAC_RX_MODE, tp->rx_mode);
8851 } 8899 }
8852 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 8900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8853 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8901 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8902 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8903 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8904 mac_mode |= MAC_MODE_LINK_POLARITY;
8854 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8905 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8855 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8906 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8856 } 8907 }
@@ -9116,10 +9167,10 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9116 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 9167 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9117 __tg3_set_rx_mode(dev); 9168 __tg3_set_rx_mode(dev);
9118 9169
9119 tg3_full_unlock(tp);
9120
9121 if (netif_running(dev)) 9170 if (netif_running(dev))
9122 tg3_netif_start(tp); 9171 tg3_netif_start(tp);
9172
9173 tg3_full_unlock(tp);
9123} 9174}
9124#endif 9175#endif
9125 9176
@@ -9410,11 +9461,13 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9410 case FLASH_5755VENDOR_ATMEL_FLASH_1: 9461 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9411 case FLASH_5755VENDOR_ATMEL_FLASH_2: 9462 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9412 case FLASH_5755VENDOR_ATMEL_FLASH_3: 9463 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9464 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9413 tp->nvram_jedecnum = JEDEC_ATMEL; 9465 tp->nvram_jedecnum = JEDEC_ATMEL;
9414 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9466 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9415 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9467 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9416 tp->nvram_pagesize = 264; 9468 tp->nvram_pagesize = 264;
9417 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1) 9469 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9470 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9418 tp->nvram_size = (protect ? 0x3e200 : 0x80000); 9471 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9419 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 9472 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9420 tp->nvram_size = (protect ? 0x1f200 : 0x40000); 9473 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
@@ -10498,11 +10551,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10498 continue; 10551 continue;
10499 } 10552 }
10500 if (pci_id->rev != PCI_ANY_ID) { 10553 if (pci_id->rev != PCI_ANY_ID) {
10501 u8 rev; 10554 if (bridge->revision > pci_id->rev)
10502
10503 pci_read_config_byte(bridge, PCI_REVISION_ID,
10504 &rev);
10505 if (rev > pci_id->rev)
10506 continue; 10555 continue;
10507 } 10556 }
10508 if (bridge->subordinate && 10557 if (bridge->subordinate &&
@@ -11944,12 +11993,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11944 * checksumming. 11993 * checksumming.
11945 */ 11994 */
11946 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { 11995 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11996 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 11997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 11998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11949 dev->features |= NETIF_F_HW_CSUM; 11999 dev->features |= NETIF_F_IPV6_CSUM;
11950 else 12000
11951 dev->features |= NETIF_F_IP_CSUM;
11952 dev->features |= NETIF_F_SG;
11953 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 12001 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11954 } else 12002 } else
11955 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 12003 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bd9f4f428e5b..d84e75e7365d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1467,6 +1467,7 @@
1467#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 1467#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
1468#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 1468#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
1469#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 1469#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
1470#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003
1470#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 1471#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
1471#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 1472#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
1472#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 1473#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
@@ -1642,6 +1643,11 @@
1642 1643
1643#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ 1644#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
1644 1645
1646#define MII_TG3_AUXCTL_MISC_WREN 0x8000
1647#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
1648#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
1649#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
1650
1645#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ 1651#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
1646#define MII_TG3_AUX_STAT_LPASS 0x0004 1652#define MII_TG3_AUX_STAT_LPASS 0x0004
1647#define MII_TG3_AUX_STAT_SPDMASK 0x0700 1653#define MII_TG3_AUX_STAT_SPDMASK 0x0700
@@ -1667,6 +1673,9 @@
1667#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ 1673#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
1668#define MII_TG3_EPHY_SHADOW_EN 0x80 1674#define MII_TG3_EPHY_SHADOW_EN 0x80
1669 1675
1676#define MII_TG3_EPHYTST_MISCCTRL 0x10 /* 5906 EPHY misc ctrl shadow register */
1677#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000
1678
1670#define MII_TG3_TEST1 0x1e 1679#define MII_TG3_TEST1 0x1e
1671#define MII_TG3_TEST1_TRIM_EN 0x0010 1680#define MII_TG3_TEST1_TRIM_EN 0x0010
1672#define MII_TG3_TEST1_CRC_EN 0x8000 1681#define MII_TG3_TEST1_CRC_EN 0x8000
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 106dc1ef0acb..74eb12107e68 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -533,7 +533,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
533 533
534 struct net_device *dev; 534 struct net_device *dev;
535 TLanPrivateInfo *priv; 535 TLanPrivateInfo *priv;
536 u8 pci_rev;
537 u16 device_id; 536 u16 device_id;
538 int reg, rc = -ENODEV; 537 int reg, rc = -ENODEV;
539 538
@@ -577,8 +576,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
577 goto err_out_free_dev; 576 goto err_out_free_dev;
578 } 577 }
579 578
580 pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev);
581
582 for ( reg= 0; reg <= 5; reg ++ ) { 579 for ( reg= 0; reg <= 5; reg ++ ) {
583 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { 580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
584 pci_io_base = pci_resource_start(pdev, reg); 581 pci_io_base = pci_resource_start(pdev, reg);
@@ -595,7 +592,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
595 592
596 dev->base_addr = pci_io_base; 593 dev->base_addr = pci_io_base;
597 dev->irq = pdev->irq; 594 dev->irq = pdev->irq;
598 priv->adapterRev = pci_rev; 595 priv->adapterRev = pdev->revision;
599 pci_set_master(pdev); 596 pci_set_master(pdev);
600 pci_set_drvdata(pdev, dev); 597 pci_set_drvdata(pdev, dev);
601 598
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 42fca26afc50..09902891a6e6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -2134,7 +2134,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2134 u_short vendor, status; 2134 u_short vendor, status;
2135 u_int irq = 0, device; 2135 u_int irq = 0, device;
2136 u_long iobase = 0; /* Clear upper 32 bits in Alphas */ 2136 u_long iobase = 0; /* Clear upper 32 bits in Alphas */
2137 int i, j, cfrv; 2137 int i, j;
2138 struct de4x5_private *lp = netdev_priv(dev); 2138 struct de4x5_private *lp = netdev_priv(dev);
2139 struct list_head *walk; 2139 struct list_head *walk;
2140 2140
@@ -2150,7 +2150,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2150 2150
2151 /* Get the chip configuration revision register */ 2151 /* Get the chip configuration revision register */
2152 pb = this_dev->bus->number; 2152 pb = this_dev->bus->number;
2153 pci_read_config_dword(this_dev, PCI_REVISION_ID, &cfrv);
2154 2153
2155 /* Set the device number information */ 2154 /* Set the device number information */
2156 lp->device = PCI_SLOT(this_dev->devfn); 2155 lp->device = PCI_SLOT(this_dev->devfn);
@@ -2158,7 +2157,8 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2158 2157
2159 /* Set the chipset information */ 2158 /* Set the chipset information */
2160 if (is_DC2114x) { 2159 if (is_DC2114x) {
2161 device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); 2160 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2161 ? DC21142 : DC21143);
2162 } 2162 }
2163 lp->chipset = device; 2163 lp->chipset = device;
2164 2164
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2254 } 2254 }
2255 2255
2256 /* Get the chip configuration revision register */ 2256 /* Get the chip configuration revision register */
2257 pci_read_config_dword(pdev, PCI_REVISION_ID, &lp->cfrv); 2257 lp->cfrv = pdev->revision;
2258 2258
2259 /* Set the device number information */ 2259 /* Set the device number information */
2260 lp->device = dev_num; 2260 lp->device = dev_num;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4ed67ff0e81e..dab74feb44bc 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -181,11 +181,12 @@
181 udelay(5); 181 udelay(5);
182 182
183#define __CHK_IO_SIZE(pci_id, dev_rev) \ 183#define __CHK_IO_SIZE(pci_id, dev_rev) \
184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \ 184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
185 DM9102A_IO_SIZE: DM9102_IO_SIZE) 185 DM9102A_IO_SIZE: DM9102_IO_SIZE)
186 186
187#define CHK_IO_SIZE(pci_dev, dev_rev) \ 187#define CHK_IO_SIZE(pci_dev) \
188 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)) 188 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
189 (pci_dev)->revision))
189 190
190/* Sten Check */ 191/* Sten Check */
191#define DEVICE net_device 192#define DEVICE net_device
@@ -205,7 +206,7 @@ struct rx_desc {
205 206
206struct dmfe_board_info { 207struct dmfe_board_info {
207 u32 chip_id; /* Chip vendor/Device ID */ 208 u32 chip_id; /* Chip vendor/Device ID */
208 u32 chip_revision; /* Chip revision */ 209 u8 chip_revision; /* Chip revision */
209 struct DEVICE *next_dev; /* next device */ 210 struct DEVICE *next_dev; /* next device */
210 struct pci_dev *pdev; /* PCI device */ 211 struct pci_dev *pdev; /* PCI device */
211 spinlock_t lock; 212 spinlock_t lock;
@@ -359,7 +360,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
359{ 360{
360 struct dmfe_board_info *db; /* board information structure */ 361 struct dmfe_board_info *db; /* board information structure */
361 struct net_device *dev; 362 struct net_device *dev;
362 u32 dev_rev, pci_pmr; 363 u32 pci_pmr;
363 int i, err; 364 int i, err;
364 365
365 DMFE_DBUG(0, "dmfe_init_one()", 0); 366 DMFE_DBUG(0, "dmfe_init_one()", 0);
@@ -392,10 +393,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
392 goto err_out_disable; 393 goto err_out_disable;
393 } 394 }
394 395
395 /* Read Chip revision */ 396 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
396 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
397
398 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
399 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); 397 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
400 err = -ENODEV; 398 err = -ENODEV;
401 goto err_out_disable; 399 goto err_out_disable;
@@ -433,7 +431,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
433 431
434 db->chip_id = ent->driver_data; 432 db->chip_id = ent->driver_data;
435 db->ioaddr = pci_resource_start(pdev, 0); 433 db->ioaddr = pci_resource_start(pdev, 0);
436 db->chip_revision = dev_rev; 434 db->chip_revision = pdev->revision;
437 db->wol_mode = 0; 435 db->wol_mode = 0;
438 436
439 db->pdev = pdev; 437 db->pdev = pdev;
@@ -455,7 +453,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
455 453
456 pci_read_config_dword(pdev, 0x50, &pci_pmr); 454 pci_read_config_dword(pdev, 0x50, &pci_pmr);
457 pci_pmr &= 0x70000; 455 pci_pmr &= 0x70000;
458 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) ) 456 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
459 db->chip_type = 1; /* DM9102A E3 */ 457 db->chip_type = 1; /* DM9102A E3 */
460 else 458 else
461 db->chip_type = 0; 459 db->chip_type = 0;
@@ -553,7 +551,7 @@ static int dmfe_open(struct DEVICE *dev)
553 551
554 /* CR6 operation mode decision */ 552 /* CR6 operation mode decision */
555 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) || 553 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
556 (db->chip_revision >= 0x02000030) ) { 554 (db->chip_revision >= 0x30) ) {
557 db->cr6_data |= DMFE_TXTH_256; 555 db->cr6_data |= DMFE_TXTH_256;
558 db->cr0_data = CR0_DEFAULT; 556 db->cr0_data = CR0_DEFAULT;
559 db->dm910x_chk_mode=4; /* Enter the normal mode */ 557 db->dm910x_chk_mode=4; /* Enter the normal mode */
@@ -1199,9 +1197,9 @@ static void dmfe_timer(unsigned long data)
1199 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ 1197 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1200 1198
1201 if ( ((db->chip_id == PCI_DM9102_ID) && 1199 if ( ((db->chip_id == PCI_DM9102_ID) &&
1202 (db->chip_revision == 0x02000030)) || 1200 (db->chip_revision == 0x30)) ||
1203 ((db->chip_id == PCI_DM9132_ID) && 1201 ((db->chip_id == PCI_DM9132_ID) &&
1204 (db->chip_revision == 0x02000010)) ) { 1202 (db->chip_revision == 0x10)) ) {
1205 /* DM9102A Chip */ 1203 /* DM9102A Chip */
1206 if (tmp_cr12 & 2) 1204 if (tmp_cr12 & 2)
1207 link_ok = 0; 1205 link_ok = 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index ea896777bcaf..53efd6694e75 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget)
197 tp->rx_buffers[entry].mapping, 197 tp->rx_buffers[entry].mapping,
198 pkt_len, PCI_DMA_FROMDEVICE); 198 pkt_len, PCI_DMA_FROMDEVICE);
199#if ! defined(__alpha__) 199#if ! defined(__alpha__)
200 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 200 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
201 pkt_len, 0); 201 pkt_len);
202 skb_put(skb, pkt_len); 202 skb_put(skb, pkt_len);
203#else 203#else
204 memcpy(skb_put(skb, pkt_len), 204 memcpy(skb_put(skb, pkt_len),
@@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev)
420 tp->rx_buffers[entry].mapping, 420 tp->rx_buffers[entry].mapping,
421 pkt_len, PCI_DMA_FROMDEVICE); 421 pkt_len, PCI_DMA_FROMDEVICE);
422#if ! defined(__alpha__) 422#if ! defined(__alpha__)
423 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 423 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
424 pkt_len, 0); 424 pkt_len);
425 skb_put(skb, pkt_len); 425 skb_put(skb, pkt_len);
426#else 426#else
427 memcpy(skb_put(skb, pkt_len), 427 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 041af63f2811..f87d76981ab7 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1155,7 +1155,7 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1155 /* set or disable MWI in the standard PCI command bit. 1155 /* set or disable MWI in the standard PCI command bit.
1156 * Check for the case where mwi is desired but not available 1156 * Check for the case where mwi is desired but not available
1157 */ 1157 */
1158 if (csr0 & MWI) pci_set_mwi(pdev); 1158 if (csr0 & MWI) pci_try_set_mwi(pdev);
1159 else pci_clear_mwi(pdev); 1159 else pci_clear_mwi(pdev);
1160 1160
1161 /* read result from hardware (in case bit refused to enable) */ 1161 /* read result from hardware (in case bit refused to enable) */
@@ -1238,7 +1238,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1238 }; 1238 };
1239 static int last_irq; 1239 static int last_irq;
1240 static int multiport_cnt; /* For four-port boards w/one EEPROM */ 1240 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1241 u8 chip_rev;
1242 int i, irq; 1241 int i, irq;
1243 unsigned short sum; 1242 unsigned short sum;
1244 unsigned char *ee_data; 1243 unsigned char *ee_data;
@@ -1274,10 +1273,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1274 1273
1275 if (pdev->vendor == 0x1282 && pdev->device == 0x9100) 1274 if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
1276 { 1275 {
1277 u32 dev_rev;
1278 /* Read Chip revision */ 1276 /* Read Chip revision */
1279 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); 1277 if (pdev->revision < 0x30)
1280 if(dev_rev < 0x02000030)
1281 { 1278 {
1282 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); 1279 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1283 return -ENODEV; 1280 return -ENODEV;
@@ -1360,8 +1357,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1360 if (!ioaddr) 1357 if (!ioaddr)
1361 goto err_out_free_res; 1358 goto err_out_free_res;
1362 1359
1363 pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
1364
1365 /* 1360 /*
1366 * initialize private data structure 'tp' 1361 * initialize private data structure 'tp'
1367 * it is zeroed and aligned in alloc_etherdev 1362 * it is zeroed and aligned in alloc_etherdev
@@ -1382,7 +1377,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1382 tp->flags = tulip_tbl[chip_idx].flags; 1377 tp->flags = tulip_tbl[chip_idx].flags;
1383 tp->pdev = pdev; 1378 tp->pdev = pdev;
1384 tp->base_addr = ioaddr; 1379 tp->base_addr = ioaddr;
1385 tp->revision = chip_rev; 1380 tp->revision = pdev->revision;
1386 tp->csr0 = csr0; 1381 tp->csr0 = csr0;
1387 spin_lock_init(&tp->lock); 1382 spin_lock_init(&tp->lock);
1388 spin_lock_init(&tp->mii_lock); 1383 spin_lock_init(&tp->mii_lock);
@@ -1399,7 +1394,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1399 tulip_mwi_config (pdev, dev); 1394 tulip_mwi_config (pdev, dev);
1400#else 1395#else
1401 /* MWI is broken for DC21143 rev 65... */ 1396 /* MWI is broken for DC21143 rev 65... */
1402 if (chip_idx == DC21143 && chip_rev == 65) 1397 if (chip_idx == DC21143 && pdev->revision == 65)
1403 tp->csr0 &= ~MWI; 1398 tp->csr0 &= ~MWI;
1404#endif 1399#endif
1405 1400
@@ -1640,7 +1635,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1640#else 1635#else
1641 "Port" 1636 "Port"
1642#endif 1637#endif
1643 " %#llx,", dev->name, chip_name, chip_rev, 1638 " %#llx,", dev->name, chip_name, pdev->revision,
1644 (unsigned long long) pci_resource_start(pdev, TULIP_BAR)); 1639 (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
1645 pci_set_drvdata(pdev, dev); 1640 pci_set_drvdata(pdev, dev);
1646 1641
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 38f3b99716b8..5824f6a35495 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev)
1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1233 np->rx_skbuff[entry]->len, 1233 np->rx_skbuff[entry]->len,
1234 PCI_DMA_FROMDEVICE); 1234 PCI_DMA_FROMDEVICE);
1235 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1235 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1236 skb_put(skb, pkt_len); 1236 skb_put(skb, pkt_len);
1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1238 np->rx_skbuff[entry]->len, 1238 np->rx_skbuff[entry]->len,
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 2470b1ee33c0..16a54e6b8d4f 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -205,7 +205,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
205{ 205{
206 struct net_device *dev = NULL; 206 struct net_device *dev = NULL;
207 struct xircom_private *private; 207 struct xircom_private *private;
208 unsigned char chip_rev;
209 unsigned long flags; 208 unsigned long flags;
210 unsigned short tmp16; 209 unsigned short tmp16;
211 enter("xircom_probe"); 210 enter("xircom_probe");
@@ -224,8 +223,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
224 pci_read_config_word (pdev,PCI_STATUS, &tmp16); 223 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
225 pci_write_config_word (pdev, PCI_STATUS,tmp16); 224 pci_write_config_word (pdev, PCI_STATUS,tmp16);
226 225
227 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
228
229 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 226 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
230 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); 227 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
231 return -ENODEV; 228 return -ENODEV;
@@ -286,7 +283,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
286 goto reg_fail; 283 goto reg_fail;
287 } 284 }
288 285
289 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq); 286 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
290 /* start the transmitter to get a heartbeat */ 287 /* start the transmitter to get a heartbeat */
291 /* TODO: send 2 dummy packets here */ 288 /* TODO: send 2 dummy packets here */
292 transceiver_voodoo(private); 289 transceiver_voodoo(private);
@@ -1208,7 +1205,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1208 goto out; 1205 goto out;
1209 } 1206 }
1210 skb_reserve(skb, 2); 1207 skb_reserve(skb, 2);
1211 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); 1208 skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
1212 skb_put(skb, pkt_len); 1209 skb_put(skb, pkt_len);
1213 skb->protocol = eth_type_trans(skb, dev); 1210 skb->protocol = eth_type_trans(skb, dev);
1214 netif_rx(skb); 1211 netif_rx(skb);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index f64172927377..fc439f333350 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -524,7 +524,6 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi
524 int chip_idx = id->driver_data; 524 int chip_idx = id->driver_data;
525 long ioaddr; 525 long ioaddr;
526 int i; 526 int i;
527 u8 chip_rev;
528 527
529/* when built into the kernel, we only print version if device is found */ 528/* when built into the kernel, we only print version if device is found */
530#ifndef MODULE 529#ifndef MODULE
@@ -620,9 +619,8 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi
620 if (register_netdev(dev)) 619 if (register_netdev(dev))
621 goto err_out_cleardev; 620 goto err_out_cleardev;
622 621
623 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
624 printk(KERN_INFO "%s: %s rev %d at %#3lx,", 622 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
625 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr); 623 dev->name, xircom_tbl[chip_idx].chip_name, pdev->revision, ioaddr);
626 for (i = 0; i < 6; i++) 624 for (i = 0; i < 6; i++)
627 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]); 625 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
628 printk(", IRQ %d.\n", dev->irq); 626 printk(", IRQ %d.\n", dev->irq);
@@ -1242,8 +1240,8 @@ xircom_rx(struct net_device *dev)
1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1240 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1243 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1241 skb_reserve(skb, 2); /* 16 byte align the IP header */
1244#if ! defined(__alpha__) 1242#if ! defined(__alpha__)
1245 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1243 skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1246 pkt_len, 0); 1244 pkt_len);
1247 skb_put(skb, pkt_len); 1245 skb_put(skb, pkt_len);
1248#else 1246#else
1249 memcpy(skb_put(skb, pkt_len), 1247 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a2c6caaaae93..62b2b3005019 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -432,6 +432,7 @@ static void tun_setup(struct net_device *dev)
432 init_waitqueue_head(&tun->read_wait); 432 init_waitqueue_head(&tun->read_wait);
433 433
434 tun->owner = -1; 434 tun->owner = -1;
435 tun->group = -1;
435 436
436 SET_MODULE_OWNER(dev); 437 SET_MODULE_OWNER(dev);
437 dev->open = tun_net_open; 438 dev->open = tun_net_open;
@@ -467,8 +468,11 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
467 return -EBUSY; 468 return -EBUSY;
468 469
469 /* Check permissions */ 470 /* Check permissions */
470 if (tun->owner != -1 && 471 if (((tun->owner != -1 &&
471 current->euid != tun->owner && !capable(CAP_NET_ADMIN)) 472 current->euid != tun->owner) ||
473 (tun->group != -1 &&
474 current->egid != tun->group)) &&
475 !capable(CAP_NET_ADMIN))
472 return -EPERM; 476 return -EPERM;
473 } 477 }
474 else if (__dev_get_by_name(ifr->ifr_name)) 478 else if (__dev_get_by_name(ifr->ifr_name))
@@ -610,6 +614,13 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
610 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 614 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
611 break; 615 break;
612 616
617 case TUNSETGROUP:
618 /* Set group of the device */
619 tun->group= (gid_t) arg;
620
621 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
622 break;
623
613 case TUNSETLINK: 624 case TUNSETLINK:
614 /* Only allow setting the type when the interface is down */ 625 /* Only allow setting the type when the interface is down */
615 if (tun->dev->flags & IFF_UP) { 626 if (tun->dev->flags & IFF_UP) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 15b2fb8aa492..03587205546e 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1703 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1703 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1704 PKT_BUF_SZ, 1704 PKT_BUF_SZ,
1705 PCI_DMA_FROMDEVICE); 1705 PCI_DMA_FROMDEVICE);
1706 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0); 1706 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1707 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1707 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1708 PKT_BUF_SZ, 1708 PKT_BUF_SZ,
1709 PCI_DMA_FROMDEVICE); 1709 PCI_DMA_FROMDEVICE);
@@ -2267,12 +2267,6 @@ need_resume:
2267 typhoon_resume(pdev); 2267 typhoon_resume(pdev);
2268 return -EBUSY; 2268 return -EBUSY;
2269} 2269}
2270
2271static int
2272typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
2273{
2274 return pci_enable_wake(pdev, state, enable);
2275}
2276#endif 2270#endif
2277 2271
2278static int __devinit 2272static int __devinit
@@ -2636,7 +2630,6 @@ static struct pci_driver typhoon_driver = {
2636#ifdef CONFIG_PM 2630#ifdef CONFIG_PM
2637 .suspend = typhoon_suspend, 2631 .suspend = typhoon_suspend,
2638 .resume = typhoon_resume, 2632 .resume = typhoon_resume,
2639 .enable_wake = typhoon_enable_wake,
2640#endif 2633#endif
2641}; 2634};
2642 2635
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 86e90c59d551..76752d84a30f 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb)
255 if (!(skb = dev_alloc_skb(pkt_len))) 255 if (!(skb = dev_alloc_skb(pkt_len)))
256 return; 256 return;
257 257
258 eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); 258 skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
259 skb_put(skb, pkt_len); 259 skb_put(skb, pkt_len);
260 260
261 skb->protocol = eth_type_trans(skb, catc->netdev); 261 skb->protocol = eth_type_trans(skb, catc->netdev);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f316..524dc5f5e46d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb)
635 635
636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
637 637
638 eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); 638 skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
639 639
640 skb_put(skb, pkt_len); 640 skb_put(skb, pkt_len);
641 641
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index adea290a9d5e..f51c2c138f10 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -622,7 +622,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
622 struct net_device *dev; 622 struct net_device *dev;
623 struct rhine_private *rp; 623 struct rhine_private *rp;
624 int i, rc; 624 int i, rc;
625 u8 pci_rev;
626 u32 quirks; 625 u32 quirks;
627 long pioaddr; 626 long pioaddr;
628 long memaddr; 627 long memaddr;
@@ -642,27 +641,25 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
642 printk(version); 641 printk(version);
643#endif 642#endif
644 643
645 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
646
647 io_size = 256; 644 io_size = 256;
648 phy_id = 0; 645 phy_id = 0;
649 quirks = 0; 646 quirks = 0;
650 name = "Rhine"; 647 name = "Rhine";
651 if (pci_rev < VTunknown0) { 648 if (pdev->revision < VTunknown0) {
652 quirks = rqRhineI; 649 quirks = rqRhineI;
653 io_size = 128; 650 io_size = 128;
654 } 651 }
655 else if (pci_rev >= VT6102) { 652 else if (pdev->revision >= VT6102) {
656 quirks = rqWOL | rqForceReset; 653 quirks = rqWOL | rqForceReset;
657 if (pci_rev < VT6105) { 654 if (pdev->revision < VT6105) {
658 name = "Rhine II"; 655 name = "Rhine II";
659 quirks |= rqStatusWBRace; /* Rhine-II exclusive */ 656 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
660 } 657 }
661 else { 658 else {
662 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ 659 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
663 if (pci_rev >= VT6105_B0) 660 if (pdev->revision >= VT6105_B0)
664 quirks |= rq6patterns; 661 quirks |= rq6patterns;
665 if (pci_rev < VT6105M) 662 if (pdev->revision < VT6105M)
666 name = "Rhine III"; 663 name = "Rhine III";
667 else 664 else
668 name = "Rhine III (Management Adapter)"; 665 name = "Rhine III (Management Adapter)";
@@ -1492,9 +1489,9 @@ static int rhine_rx(struct net_device *dev, int limit)
1492 rp->rx_buf_sz, 1489 rp->rx_buf_sz,
1493 PCI_DMA_FROMDEVICE); 1490 PCI_DMA_FROMDEVICE);
1494 1491
1495 eth_copy_and_sum(skb, 1492 skb_copy_to_linear_data(skb,
1496 rp->rx_skbuff[entry]->data, 1493 rp->rx_skbuff[entry]->data,
1497 pkt_len, 0); 1494 pkt_len);
1498 skb_put(skb, pkt_len); 1495 skb_put(skb, pkt_len);
1499 pci_dma_sync_single_for_device(rp->pdev, 1496 pci_dma_sync_single_for_device(rp->pdev,
1500 rp->rx_skbuff_dma[entry], 1497 rp->rx_skbuff_dma[entry],
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b670b97bcfde..f331843d1102 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -890,8 +890,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
890 890
891static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) 891static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
892{ 892{
893 if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) 893 vptr->rev_id = pdev->revision;
894 return -EIO;
895 894
896 pci_set_master(pdev); 895 pci_set_master(pdev);
897 896
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4fc8681bc110..a3df09ee729f 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -61,7 +61,7 @@ config COSA
61# 61#
62config LANMEDIA 62config LANMEDIA
63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
64 depends on PCI 64 depends on PCI && VIRT_TO_BUS
65 ---help--- 65 ---help---
66 Driver for the following Lan Media family of serial boards: 66 Driver for the following Lan Media family of serial boards:
67 67
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 999bf71937ca..ec1c556a47ca 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3439,7 +3439,6 @@ static int __devinit
3439cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 3439cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3440{ 3440{
3441 static int first_time = 1; 3441 static int first_time = 1;
3442 ucchar cpc_rev_id;
3443 int err, eeprom_outdated = 0; 3442 int err, eeprom_outdated = 0;
3444 ucshort device_id; 3443 ucshort device_id;
3445 pc300_t *card; 3444 pc300_t *card;
@@ -3480,7 +3479,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3480 card->hw.falcsize = pci_resource_len(pdev, 4); 3479 card->hw.falcsize = pci_resource_len(pdev, 4);
3481 card->hw.plxphys = pci_resource_start(pdev, 5); 3480 card->hw.plxphys = pci_resource_start(pdev, 5);
3482 card->hw.plxsize = pci_resource_len(pdev, 5); 3481 card->hw.plxsize = pci_resource_len(pdev, 5);
3483 pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id);
3484 3482
3485 switch (device_id) { 3483 switch (device_id) {
3486 case PCI_DEVICE_ID_PC300_RX_1: 3484 case PCI_DEVICE_ID_PC300_RX_1:
@@ -3498,7 +3496,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3498 } 3496 }
3499#ifdef PC300_DEBUG_PCI 3497#ifdef PC300_DEBUG_PCI
3500 printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn); 3498 printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn);
3501 printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq); 3499 printk("rev_id=%d) IRQ%d\n", pdev->revision, card->hw.irq);
3502 printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx " 3500 printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx "
3503 "ctladdr=0x%08lx falcaddr=0x%08lx\n", 3501 "ctladdr=0x%08lx falcaddr=0x%08lx\n",
3504 card->hw.ramphys, card->hw.plxphys, card->hw.scaphys, 3502 card->hw.ramphys, card->hw.plxphys, card->hw.scaphys,
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index aff05dba720a..dfbd3b00f03b 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -311,7 +311,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
311 const struct pci_device_id *ent) 311 const struct pci_device_id *ent)
312{ 312{
313 card_t *card; 313 card_t *card;
314 u8 rev_id;
315 u32 __iomem *p; 314 u32 __iomem *p;
316 int i; 315 int i;
317 u32 ramsize; 316 u32 ramsize;
@@ -366,7 +365,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
366 return -ENOMEM; 365 return -ENOMEM;
367 } 366 }
368 367
369 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
370 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || 368 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
371 pci_resource_len(pdev, 2) != PC300_SCA_SIZE || 369 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
372 pci_resource_len(pdev, 3) < 16384) { 370 pci_resource_len(pdev, 3) < 16384) {
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ca06a00d9d86..7f720de2e9f0 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -289,7 +289,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
289 const struct pci_device_id *ent) 289 const struct pci_device_id *ent)
290{ 290{
291 card_t *card; 291 card_t *card;
292 u8 rev_id;
293 u32 __iomem *p; 292 u32 __iomem *p;
294 int i; 293 int i;
295 u32 ramsize; 294 u32 ramsize;
@@ -330,7 +329,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
330 return -ENOMEM; 329 return -ENOMEM;
331 } 330 }
332 331
333 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
334 if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || 332 if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
335 pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || 333 pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
336 pci_resource_len(pdev, 3) < 16384) { 334 pci_resource_len(pdev, 3) < 16384) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index ef6b253a92ce..c5d6753a55ea 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3741,10 +3741,8 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
3741 &bcm->board_type); 3741 &bcm->board_type);
3742 if (err) 3742 if (err)
3743 goto err_iounmap; 3743 goto err_iounmap;
3744 err = bcm43xx_pci_read_config16(bcm, PCI_REVISION_ID, 3744
3745 &bcm->board_revision); 3745 bcm->board_revision = bcm->pci_dev->revision;
3746 if (err)
3747 goto err_iounmap;
3748 3746
3749 err = bcm43xx_chipset_attach(bcm); 3747 err = bcm43xx_chipset_attach(bcm);
3750 if (err) 3748 if (err)
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 0cd48d151f5e..7da3664b8515 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -453,8 +453,6 @@ static struct pci_driver prism2_pci_drv_id = {
453 .suspend = prism2_pci_suspend, 453 .suspend = prism2_pci_suspend,
454 .resume = prism2_pci_resume, 454 .resume = prism2_pci_resume,
455#endif /* CONFIG_PM */ 455#endif /* CONFIG_PM */
456 /* Linux 2.4.6 added save_state and enable_wake that are not used here
457 */
458}; 456};
459 457
460 458
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0183df757b3e..040dc3e36410 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -613,9 +613,6 @@ static struct pci_driver prism2_plx_drv_id = {
613 .id_table = prism2_plx_id_table, 613 .id_table = prism2_plx_id_table,
614 .probe = prism2_plx_probe, 614 .probe = prism2_plx_probe,
615 .remove = prism2_plx_remove, 615 .remove = prism2_plx_remove,
616 .suspend = NULL,
617 .resume = NULL,
618 .enable_wake = NULL
619}; 616};
620 617
621 618
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 3dcb13bb7d57..af2e4f2405f2 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -87,7 +87,6 @@ static struct pci_driver prism54_driver = {
87 .remove = prism54_remove, 87 .remove = prism54_remove,
88 .suspend = prism54_suspend, 88 .suspend = prism54_suspend,
89 .resume = prism54_resume, 89 .resume = prism54_resume,
90 /* .enable_wake ; we don't support this yet */
91}; 90};
92 91
93/****************************************************************************** 92/******************************************************************************
@@ -167,8 +166,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
167 pci_set_master(pdev); 166 pci_set_master(pdev);
168 167
169 /* enable MWI */ 168 /* enable MWI */
170 if (!pci_set_mwi(pdev)) 169 pci_try_set_mwi(pdev);
171 printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME);
172 170
173 /* setup the network device interface and its structure */ 171 /* setup the network device interface and its structure */
174 if (!(ndev = islpci_setup(pdev))) { 172 if (!(ndev = islpci_setup(pdev))) {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ce9230b2f630..c8b5c2271938 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
1011 } else { 1011 } else {
1012 skb->dev = dev; 1012 skb->dev = dev;
1013 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ 1013 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
1014 eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0); 1014 skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
1015 wl3501_receive(this, skb->data, pkt_len); 1015 wl3501_receive(this, skb->data, pkt_len);
1016 skb_put(skb, pkt_len); 1016 skb_put(skb, pkt_len);
1017 skb->protocol = eth_type_trans(skb, dev); 1017 skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a1a54748ccbd..28d41a29d7b1 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -15,7 +15,6 @@
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */ 16 */
17 17
18#include <asm/unaligned.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/module.h> 20#include <linux/module.h>
@@ -26,6 +25,7 @@
26#include <linux/usb.h> 25#include <linux/usb.h>
27#include <linux/workqueue.h> 26#include <linux/workqueue.h>
28#include <net/ieee80211.h> 27#include <net/ieee80211.h>
28#include <asm/unaligned.h>
29 29
30#include "zd_def.h" 30#include "zd_def.h"
31#include "zd_netdev.h" 31#include "zd_netdev.h"
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index f2a90a7fa2d6..870c5393c21a 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev)
1137 if (skb == NULL) 1137 if (skb == NULL)
1138 break; 1138 break;
1139 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1139 skb_reserve(skb, 2); /* 16 byte align the IP header */
1140 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); 1140 skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1141 skb_put(skb, pkt_len); 1141 skb_put(skb, pkt_len);
1142 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1142 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1143 yp->rx_buf_sz, 1143 yp->rx_buf_sz,