aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/fec.c24
-rw-r--r--drivers/net/lguest_net.c555
-rw-r--r--drivers/net/mlx4/fw.c2
-rw-r--r--drivers/net/mlx4/icm.c14
-rw-r--r--drivers/net/mv643xx_eth.c1
-rw-r--r--drivers/net/niu.c34
-rw-r--r--drivers/net/ppp_mppe.c6
-rw-r--r--drivers/net/r8169.c406
-rw-r--r--drivers/net/tg3.c95
-rw-r--r--drivers/net/tg3.h11
-rw-r--r--drivers/net/virtio_net.c435
14 files changed, 817 insertions, 776 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index eb75773a9ebb..86b8641b4664 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3103,4 +3103,10 @@ config NETPOLL_TRAP
3103config NET_POLL_CONTROLLER 3103config NET_POLL_CONTROLLER
3104 def_bool NETPOLL 3104 def_bool NETPOLL
3105 3105
3106config VIRTIO_NET
3107 tristate "Virtio network driver (EXPERIMENTAL)"
3108 depends on EXPERIMENTAL && VIRTIO
3109 ---help---
3110 This is the virtual network driver for lguest. Say Y or M.
3111
3106endif # NETDEVICES 3112endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 22f78cbd126b..593262065c9b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -183,7 +183,6 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
183obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 183obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
184obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 184obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
185obj-$(CONFIG_EQUALIZER) += eql.o 185obj-$(CONFIG_EQUALIZER) += eql.o
186obj-$(CONFIG_LGUEST_NET) += lguest_net.o
187obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 186obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
188obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 187obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
189obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o 188obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
@@ -243,3 +242,4 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
243 242
244obj-$(CONFIG_NETXEN_NIC) += netxen/ 243obj-$(CONFIG_NETXEN_NIC) += netxen/
245obj-$(CONFIG_NIU) += niu.o 244obj-$(CONFIG_NIU) += niu.o
245obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ed53aaab4c02..ae419736158e 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -471,7 +471,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
471 } 471 }
472 472
473 len = max(skb->len, ETH_ZLEN); 473 len = max(skb->len, ETH_ZLEN);
474 queue = skb->queue_mapping; 474 queue = skb_get_queue_mapping(skb);
475#ifdef CONFIG_NETDEVICES_MULTIQUEUE 475#ifdef CONFIG_NETDEVICES_MULTIQUEUE
476 netif_stop_subqueue(dev, queue); 476 netif_stop_subqueue(dev, queue);
477#else 477#else
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2b5782056dda..0fbf1bbbaee9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -751,13 +751,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
751 if (mii_head) { 751 if (mii_head) {
752 mii_tail->mii_next = mip; 752 mii_tail->mii_next = mip;
753 mii_tail = mip; 753 mii_tail = mip;
754 } 754 } else {
755 else {
756 mii_head = mii_tail = mip; 755 mii_head = mii_tail = mip;
757 fep->hwp->fec_mii_data = regval; 756 fep->hwp->fec_mii_data = regval;
758 } 757 }
759 } 758 } else {
760 else {
761 retval = 1; 759 retval = 1;
762 } 760 }
763 761
@@ -768,14 +766,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
768 766
769static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 767static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
770{ 768{
771 int k;
772
773 if(!c) 769 if(!c)
774 return; 770 return;
775 771
776 for(k = 0; (c+k)->mii_data != mk_mii_end; k++) { 772 for (; c->mii_data != mk_mii_end; c++)
777 mii_queue(dev, (c+k)->mii_data, (c+k)->funct); 773 mii_queue(dev, c->mii_data, c->funct);
778 }
779} 774}
780 775
781static void mii_parse_sr(uint mii_reg, struct net_device *dev) 776static void mii_parse_sr(uint mii_reg, struct net_device *dev)
@@ -792,7 +787,6 @@ static void mii_parse_sr(uint mii_reg, struct net_device *dev)
792 status |= PHY_STAT_FAULT; 787 status |= PHY_STAT_FAULT;
793 if (mii_reg & 0x0020) 788 if (mii_reg & 0x0020)
794 status |= PHY_STAT_ANC; 789 status |= PHY_STAT_ANC;
795
796 *s = status; 790 *s = status;
797} 791}
798 792
@@ -1239,7 +1233,6 @@ mii_link_interrupt(int irq, void * dev_id);
1239#endif 1233#endif
1240 1234
1241#if defined(CONFIG_M5272) 1235#if defined(CONFIG_M5272)
1242
1243/* 1236/*
1244 * Code specific to Coldfire 5272 setup. 1237 * Code specific to Coldfire 5272 setup.
1245 */ 1238 */
@@ -2020,8 +2013,7 @@ static void mii_relink(struct work_struct *work)
2020 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 2013 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
2021 duplex = 1; 2014 duplex = 1;
2022 fec_restart(dev, duplex); 2015 fec_restart(dev, duplex);
2023 } 2016 } else
2024 else
2025 fec_stop(dev); 2017 fec_stop(dev);
2026 2018
2027#if 0 2019#if 0
@@ -2119,8 +2111,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2119 fep->phy_id = phytype << 16; 2111 fep->phy_id = phytype << 16;
2120 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 2112 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
2121 mii_discover_phy3); 2113 mii_discover_phy3);
2122 } 2114 } else {
2123 else {
2124 fep->phy_addr++; 2115 fep->phy_addr++;
2125 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 2116 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
2126 mii_discover_phy); 2117 mii_discover_phy);
@@ -2574,8 +2565,7 @@ fec_restart(struct net_device *dev, int duplex)
2574 if (duplex) { 2565 if (duplex) {
2575 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2566 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */
2576 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2567 fecp->fec_x_cntrl = 0x04; /* FD enable */
2577 } 2568 } else {
2578 else {
2579 /* MII enable|No Rcv on Xmit */ 2569 /* MII enable|No Rcv on Xmit */
2580 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2570 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06;
2581 fecp->fec_x_cntrl = 0x00; 2571 fecp->fec_x_cntrl = 0x00;
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
deleted file mode 100644
index abce2ee8430a..000000000000
--- a/drivers/net/lguest_net.c
+++ /dev/null
@@ -1,555 +0,0 @@
1/*D:500
2 * The Guest network driver.
3 *
4 * This is very simple a virtual network driver, and our last Guest driver.
5 * The only trick is that it can talk directly to multiple other recipients
6 * (ie. other Guests on the same network). It can also be used with only the
7 * Host on the network.
8 :*/
9
10/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26//#define DEBUG
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/module.h>
30#include <linux/mm_types.h>
31#include <linux/io.h>
32#include <linux/lguest_bus.h>
33
34#define SHARED_SIZE PAGE_SIZE
35#define MAX_LANS 4
36#define NUM_SKBS 8
37
38/*M:011 Network code master Jeff Garzik points out numerous shortcomings in
39 * this driver if it aspires to greatness.
40 *
41 * Firstly, it doesn't use "NAPI": the networking's New API, and is poorer for
42 * it. As he says "NAPI means system-wide load leveling, across multiple
43 * network interfaces. Lack of NAPI can mean competition at higher loads."
44 *
45 * He also points out that we don't implement set_mac_address, so users cannot
46 * change the devices hardware address. When I asked why one would want to:
47 * "Bonding, and situations where you /do/ want the MAC address to "leak" out
48 * of the host onto the wider net."
49 *
50 * Finally, he would like module unloading: "It is not unrealistic to think of
51 * [un|re|]loading the net support module in an lguest guest. And, adding
52 * module support makes the programmer more responsible, because they now have
53 * to learn to clean up after themselves. Any driver that cannot clean up
54 * after itself is an incomplete driver in my book."
55 :*/
56
57/*D:530 The "struct lguestnet_info" contains all the information we need to
58 * know about the network device. */
59struct lguestnet_info
60{
61 /* The mapped device page(s) (an array of "struct lguest_net"). */
62 struct lguest_net *peer;
63 /* The physical address of the device page(s) */
64 unsigned long peer_phys;
65 /* The size of the device page(s). */
66 unsigned long mapsize;
67
68 /* The lguest_device I come from */
69 struct lguest_device *lgdev;
70
71 /* My peerid (ie. my slot in the array). */
72 unsigned int me;
73
74 /* Receive queue: the network packets waiting to be filled. */
75 struct sk_buff *skb[NUM_SKBS];
76 struct lguest_dma dma[NUM_SKBS];
77};
78/*:*/
79
80/* How many bytes left in this page. */
81static unsigned int rest_of_page(void *data)
82{
83 return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
84}
85
86/*D:570 Each peer (ie. Guest or Host) on the network binds their receive
87 * buffers to a different key: we simply use the physical address of the
88 * device's memory page plus the peer number. The Host insists that all keys
89 * be a multiple of 4, so we multiply the peer number by 4. */
90static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
91{
92 return info->peer_phys + 4 * peernum;
93}
94
95/* This is the routine which sets up a "struct lguest_dma" to point to a
96 * network packet, similar to req_to_dma() in lguest_blk.c. The structure of a
97 * "struct sk_buff" has grown complex over the years: it consists of a "head"
98 * linear section pointed to by "skb->data", and possibly an array of
99 * "fragments" in the case of a non-linear packet.
100 *
101 * Our receive buffers don't use fragments at all but outgoing skbs might, so
102 * we handle it. */
103static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
104 struct lguest_dma *dma)
105{
106 unsigned int i, seg;
107
108 /* First, we put the linear region into the "struct lguest_dma". Each
109 * entry can't go over a page boundary, so even though all our packets
110 * are 1514 bytes or less, we might need to use two entries here: */
111 for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
112 dma->addr[seg] = virt_to_phys(skb->data + i);
113 dma->len[seg] = min((unsigned)(headlen - i),
114 rest_of_page(skb->data + i));
115 }
116
117 /* Now we handle the fragments: at least they're guaranteed not to go
118 * over a page. skb_shinfo(skb) returns a pointer to the structure
119 * which tells us about the number of fragments and the fragment
120 * array. */
121 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
122 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
123 /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
124 if (seg == LGUEST_MAX_DMA_SECTIONS) {
125 /* We will end up sending a truncated packet should
126 * this ever happen. Plus, a cool log message! */
127 printk("Woah dude! Megapacket!\n");
128 break;
129 }
130 dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
131 dma->len[seg] = f->size;
132 }
133
134 /* If after all that we didn't use the entire "struct lguest_dma"
135 * array, we terminate it with a 0 length. */
136 if (seg < LGUEST_MAX_DMA_SECTIONS)
137 dma->len[seg] = 0;
138}
139
140/*
141 * Packet transmission.
142 *
143 * Our packet transmission is a little unusual. A real network card would just
144 * send out the packet and leave the receivers to decide if they're interested.
145 * Instead, we look through the network device memory page and see if any of
146 * the ethernet addresses match the packet destination, and if so we send it to
147 * that Guest.
148 *
149 * This is made a little more complicated in two cases. The first case is
150 * broadcast packets: for that we send the packet to all Guests on the network,
151 * one at a time. The second case is "promiscuous" mode, where a Guest wants
152 * to see all the packets on the network. We need a way for the Guest to tell
153 * us it wants to see all packets, so it sets the "multicast" bit on its
154 * published MAC address, which is never valid in a real ethernet address.
155 */
156#define PROMISC_BIT 0x01
157
158/* This is the callback which is summoned whenever the network device's
159 * multicast or promiscuous state changes. If the card is in promiscuous mode,
160 * we advertise that in our ethernet address in the device's memory. We do the
161 * same if Linux wants any or all multicast traffic. */
162static void lguestnet_set_multicast(struct net_device *dev)
163{
164 struct lguestnet_info *info = netdev_priv(dev);
165
166 if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
167 info->peer[info->me].mac[0] |= PROMISC_BIT;
168 else
169 info->peer[info->me].mac[0] &= ~PROMISC_BIT;
170}
171
172/* A simple test function to see if a peer wants to see all packets.*/
173static int promisc(struct lguestnet_info *info, unsigned int peer)
174{
175 return info->peer[peer].mac[0] & PROMISC_BIT;
176}
177
178/* Another simple function to see if a peer's advertised ethernet address
179 * matches a packet's destination ethernet address. */
180static int mac_eq(const unsigned char mac[ETH_ALEN],
181 struct lguestnet_info *info, unsigned int peer)
182{
183 /* Ignore multicast bit, which peer turns on to mean promisc. */
184 if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
185 return 0;
186 return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
187}
188
189/* This is the function which actually sends a packet once we've decided a
190 * peer wants it: */
191static void transfer_packet(struct net_device *dev,
192 struct sk_buff *skb,
193 unsigned int peernum)
194{
195 struct lguestnet_info *info = netdev_priv(dev);
196 struct lguest_dma dma;
197
198 /* We use our handy "struct lguest_dma" packing function to prepare
199 * the skb for sending. */
200 skb_to_dma(skb, skb_headlen(skb), &dma);
201 pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
202
203 /* This is the actual send call which copies the packet. */
204 lguest_send_dma(peer_key(info, peernum), &dma);
205
206 /* Check that the entire packet was transmitted. If not, it could mean
207 * that the other Guest registered a short receive buffer, but this
208 * driver should never do that. More likely, the peer is dead. */
209 if (dma.used_len != skb->len) {
210 dev->stats.tx_carrier_errors++;
211 pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
212 peernum, dma.used_len, skb->len,
213 (void *)dma.addr[0], dma.len[0]);
214 } else {
215 /* On success we update the stats. */
216 dev->stats.tx_bytes += skb->len;
217 dev->stats.tx_packets++;
218 }
219}
220
221/* Another helper function to tell is if a slot in the device memory is unused.
222 * Since we always set the Local Assignment bit in the ethernet address, the
223 * first byte can never be 0. */
224static int unused_peer(const struct lguest_net peer[], unsigned int num)
225{
226 return peer[num].mac[0] == 0;
227}
228
229/* Finally, here is the routine which handles an outgoing packet. It's called
230 * "start_xmit" for traditional reasons. */
231static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
232{
233 unsigned int i;
234 int broadcast;
235 struct lguestnet_info *info = netdev_priv(dev);
236 /* Extract the destination ethernet address from the packet. */
237 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
238 DECLARE_MAC_BUF(mac);
239
240 pr_debug("%s: xmit %s\n", dev->name, print_mac(mac, dest));
241
242 /* If it's a multicast packet, we broadcast to everyone. That's not
243 * very efficient, but there are very few applications which actually
244 * use multicast, which is a shame really.
245 *
246 * As etherdevice.h points out: "By definition the broadcast address is
247 * also a multicast address." So we don't have to test for broadcast
248 * packets separately. */
249 broadcast = is_multicast_ether_addr(dest);
250
251 /* Look through all the published ethernet addresses to see if we
252 * should send this packet. */
253 for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
254 /* We don't send to ourselves (we actually can't SEND_DMA to
255 * ourselves anyway), and don't send to unused slots.*/
256 if (i == info->me || unused_peer(info->peer, i))
257 continue;
258
259 /* If it's broadcast we send it. If they want every packet we
260 * send it. If the destination matches their address we send
261 * it. Otherwise we go to the next peer. */
262 if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
263 continue;
264
265 pr_debug("lguestnet %s: sending from %i to %i\n",
266 dev->name, info->me, i);
267 /* Our routine which actually does the transfer. */
268 transfer_packet(dev, skb, i);
269 }
270
271 /* An xmit routine is expected to dispose of the packet, so we do. */
272 dev_kfree_skb(skb);
273
274 /* As per kernel convention, 0 means success. This is why I love
275 * networking: even if we never sent to anyone, that's still
276 * success! */
277 return 0;
278}
279
280/*D:560
281 * Packet receiving.
282 *
283 * First, here's a helper routine which fills one of our array of receive
284 * buffers: */
285static int fill_slot(struct net_device *dev, unsigned int slot)
286{
287 struct lguestnet_info *info = netdev_priv(dev);
288
289 /* We can receive ETH_DATA_LEN (1500) byte packets, plus a standard
290 * ethernet header of ETH_HLEN (14) bytes. */
291 info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
292 if (!info->skb[slot]) {
293 printk("%s: could not fill slot %i\n", dev->name, slot);
294 return -ENOMEM;
295 }
296
297 /* skb_to_dma() is a helper which sets up the "struct lguest_dma" to
298 * point to the data in the skb: we also use it for sending out a
299 * packet. */
300 skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
301
302 /* This is a Write Memory Barrier: it ensures that the entry in the
303 * receive buffer array is written *before* we set the "used_len" entry
304 * to 0. If the Host were looking at the receive buffer array from a
305 * different CPU, it could potentially see "used_len = 0" and not see
306 * the updated receive buffer information. This would be a horribly
307 * nasty bug, so make sure the compiler and CPU know this has to happen
308 * first. */
309 wmb();
310 /* Writing 0 to "used_len" tells the Host it can use this receive
311 * buffer now. */
312 info->dma[slot].used_len = 0;
313 return 0;
314}
315
316/* This is the actual receive routine. When we receive an interrupt from the
317 * Host to tell us a packet has been delivered, we arrive here: */
318static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
319{
320 struct net_device *dev = dev_id;
321 struct lguestnet_info *info = netdev_priv(dev);
322 unsigned int i, done = 0;
323
324 /* Look through our entire receive array for an entry which has data
325 * in it. */
326 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
327 unsigned int length;
328 struct sk_buff *skb;
329
330 length = info->dma[i].used_len;
331 if (length == 0)
332 continue;
333
334 /* We've found one! Remember the skb (we grabbed the length
335 * above), and immediately refill the slot we've taken it
336 * from. */
337 done++;
338 skb = info->skb[i];
339 fill_slot(dev, i);
340
341 /* This shouldn't happen: micropackets could be sent by a
342 * badly-behaved Guest on the network, but the Host will never
343 * stuff more data in the buffer than the buffer length. */
344 if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
345 pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
346 dev->name, length);
347 dev_kfree_skb(skb);
348 continue;
349 }
350
351 /* skb_put(), what a great function! I've ranted about this
352 * function before (http://lkml.org/lkml/1999/9/26/24). You
353 * call it after you've added data to the end of an skb (in
354 * this case, it was the Host which wrote the data). */
355 skb_put(skb, length);
356
357 /* The ethernet header contains a protocol field: we use the
358 * standard helper to extract it, and place the result in
359 * skb->protocol. The helper also sets up skb->pkt_type and
360 * eats up the ethernet header from the front of the packet. */
361 skb->protocol = eth_type_trans(skb, dev);
362
363 /* If this device doesn't need checksums for sending, we also
364 * don't need to check the packets when they come in. */
365 if (dev->features & NETIF_F_NO_CSUM)
366 skb->ip_summed = CHECKSUM_UNNECESSARY;
367
368 /* As a last resort for debugging the driver or the lguest I/O
369 * subsystem, you can uncomment the "#define DEBUG" at the top
370 * of this file, which turns all the pr_debug() into printk()
371 * and floods the logs. */
372 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
373 ntohs(skb->protocol), skb->len, skb->pkt_type);
374
375 /* Update the packet and byte counts (visible from ifconfig,
376 * and good for debugging). */
377 dev->stats.rx_bytes += skb->len;
378 dev->stats.rx_packets++;
379
380 /* Hand our fresh network packet into the stack's "network
381 * interface receive" routine. That will free the packet
382 * itself when it's finished. */
383 netif_rx(skb);
384 }
385
386 /* If we found any packets, we assume the interrupt was for us. */
387 return done ? IRQ_HANDLED : IRQ_NONE;
388}
389
390/*D:550 This is where we start: when the device is brought up by dhcpd or
391 * ifconfig. At this point we advertise our MAC address to the rest of the
392 * network, and register receive buffers ready for incoming packets. */
393static int lguestnet_open(struct net_device *dev)
394{
395 int i;
396 struct lguestnet_info *info = netdev_priv(dev);
397
398 /* Copy our MAC address into the device page, so others on the network
399 * can find us. */
400 memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
401
402 /* We might already be in promisc mode (dev->flags & IFF_PROMISC). Our
403 * set_multicast callback handles this already, so we call it now. */
404 lguestnet_set_multicast(dev);
405
406 /* Allocate packets and put them into our "struct lguest_dma" array.
407 * If we fail to allocate all the packets we could still limp along,
408 * but it's a sign of real stress so we should probably give up now. */
409 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
410 if (fill_slot(dev, i) != 0)
411 goto cleanup;
412 }
413
414 /* Finally we tell the Host where our array of "struct lguest_dma"
415 * receive buffers is, binding it to the key corresponding to the
416 * device's physical memory plus our peerid. */
417 if (lguest_bind_dma(peer_key(info,info->me), info->dma,
418 NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
419 goto cleanup;
420 return 0;
421
422cleanup:
423 while (--i >= 0)
424 dev_kfree_skb(info->skb[i]);
425 return -ENOMEM;
426}
427/*:*/
428
429/* The close routine is called when the device is no longer in use: we clean up
430 * elegantly. */
431static int lguestnet_close(struct net_device *dev)
432{
433 unsigned int i;
434 struct lguestnet_info *info = netdev_priv(dev);
435
436 /* Clear all trace of our existence out of the device memory by setting
437 * the slot which held our MAC address to 0 (unused). */
438 memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
439
440 /* Unregister our array of receive buffers */
441 lguest_unbind_dma(peer_key(info, info->me), info->dma);
442 for (i = 0; i < ARRAY_SIZE(info->dma); i++)
443 dev_kfree_skb(info->skb[i]);
444 return 0;
445}
446
447/*D:510 The network device probe function is basically a standard ethernet
448 * device setup. It reads the "struct lguest_device_desc" and sets the "struct
449 * net_device". Oh, the line-by-line excitement! Let's skip over it. :*/
450static int lguestnet_probe(struct lguest_device *lgdev)
451{
452 int err, irqf = IRQF_SHARED;
453 struct net_device *dev;
454 struct lguestnet_info *info;
455 struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
456
457 pr_debug("lguest_net: probing for device %i\n", lgdev->index);
458
459 dev = alloc_etherdev(sizeof(struct lguestnet_info));
460 if (!dev)
461 return -ENOMEM;
462
463 /* Ethernet defaults with some changes */
464 ether_setup(dev);
465 dev->set_mac_address = NULL;
466
467 dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
468 dev->dev_addr[1] = 0x00;
469 memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
470 dev->dev_addr[4] = 0x00;
471 dev->dev_addr[5] = 0x00;
472
473 dev->open = lguestnet_open;
474 dev->stop = lguestnet_close;
475 dev->hard_start_xmit = lguestnet_start_xmit;
476
477 /* We don't actually support multicast yet, but turning on/off
478 * promisc also calls dev->set_multicast_list. */
479 dev->set_multicast_list = lguestnet_set_multicast;
480 SET_NETDEV_DEV(dev, &lgdev->dev);
481
482 /* The network code complains if you have "scatter-gather" capability
483 * if you don't also handle checksums (it seem that would be
484 * "illogical"). So we use a lie of omission and don't tell it that we
485 * can handle scattered packets unless we also don't want checksums,
486 * even though to us they're completely independent. */
487 if (desc->features & LGUEST_NET_F_NOCSUM)
488 dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
489
490 info = netdev_priv(dev);
491 info->mapsize = PAGE_SIZE * desc->num_pages;
492 info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
493 info->lgdev = lgdev;
494 info->peer = lguest_map(info->peer_phys, desc->num_pages);
495 if (!info->peer) {
496 err = -ENOMEM;
497 goto free;
498 }
499
500 /* This stores our peerid (upper bits reserved for future). */
501 info->me = (desc->features & (info->mapsize-1));
502
503 err = register_netdev(dev);
504 if (err) {
505 pr_debug("lguestnet: registering device failed\n");
506 goto unmap;
507 }
508
509 if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
510 irqf |= IRQF_SAMPLE_RANDOM;
511 if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
512 dev) != 0) {
513 pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
514 goto unregister;
515 }
516
517 pr_debug("lguestnet: registered device %s\n", dev->name);
518 /* Finally, we put the "struct net_device" in the generic "struct
519 * lguest_device"s private pointer. Again, it's not necessary, but
520 * makes sure the cool kernel kids don't tease us. */
521 lgdev->private = dev;
522 return 0;
523
524unregister:
525 unregister_netdev(dev);
526unmap:
527 lguest_unmap(info->peer);
528free:
529 free_netdev(dev);
530 return err;
531}
532
533static struct lguest_driver lguestnet_drv = {
534 .name = "lguestnet",
535 .owner = THIS_MODULE,
536 .device_type = LGUEST_DEVICE_T_NET,
537 .probe = lguestnet_probe,
538};
539
540static __init int lguestnet_init(void)
541{
542 return register_lguest_driver(&lguestnet_drv);
543}
544module_init(lguestnet_init);
545
546MODULE_DESCRIPTION("Lguest network driver");
547MODULE_LICENSE("GPL");
548
549/*D:580
550 * This is the last of the Drivers, and with this we have covered the many and
551 * wonderous and fine (and boring) details of the Guest.
552 *
553 * "make Launcher" beckons, where we answer questions like "Where do Guests
554 * come from?", and "What do you do when someone asks for optimization?"
555 */
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 6471d33afb7d..50648738d679 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -736,7 +736,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
736 MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); 736 MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
737 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 737 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
738 738
739 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1000); 739 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
740 740
741 if (err) 741 if (err)
742 mlx4_err(dev, "INIT_HCA returns %d\n", err); 742 mlx4_err(dev, "INIT_HCA returns %d\n", err);
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 4b3c109d5eae..887633b207d9 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -60,7 +60,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
60 PCI_DMA_BIDIRECTIONAL); 60 PCI_DMA_BIDIRECTIONAL);
61 61
62 for (i = 0; i < chunk->npages; ++i) 62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(chunk->mem[i].page, 63 __free_pages(sg_page(&chunk->mem[i]),
64 get_order(chunk->mem[i].length)); 64 get_order(chunk->mem[i].length));
65} 65}
66 66
@@ -70,7 +70,7 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
70 70
71 for (i = 0; i < chunk->npages; ++i) 71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73 lowmem_page_address(chunk->mem[i].page), 73 lowmem_page_address(sg_page(&chunk->mem[i])),
74 sg_dma_address(&chunk->mem[i])); 74 sg_dma_address(&chunk->mem[i]));
75} 75}
76 76
@@ -95,10 +95,13 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
95 95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97{ 97{
98 mem->page = alloc_pages(gfp_mask, order); 98 struct page *page;
99 if (!mem->page) 99
100 page = alloc_pages(gfp_mask, order);
101 if (!page)
100 return -ENOMEM; 102 return -ENOMEM;
101 103
104 sg_set_page(mem, page);
102 mem->length = PAGE_SIZE << order; 105 mem->length = PAGE_SIZE << order;
103 mem->offset = 0; 106 mem->offset = 0;
104 return 0; 107 return 0;
@@ -145,6 +148,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
145 if (!chunk) 148 if (!chunk)
146 goto fail; 149 goto fail;
147 150
151 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
148 chunk->npages = 0; 152 chunk->npages = 0;
149 chunk->nsg = 0; 153 chunk->nsg = 0;
150 list_add_tail(&chunk->list, &icm->chunk_list); 154 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -334,7 +338,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
334 * been assigned to. 338 * been assigned to.
335 */ 339 */
336 if (chunk->mem[i].length > offset) { 340 if (chunk->mem[i].length > offset) {
337 page = chunk->mem[i].page; 341 page = sg_page(&chunk->mem[i]);
338 goto out; 342 goto out;
339 } 343 }
340 offset -= chunk->mem[i].length; 344 offset -= chunk->mem[i].length;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 5b41e8bdd6b4..651c2699d5e1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -3274,6 +3274,7 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
3274 .get_drvinfo = mv643xx_get_drvinfo, 3274 .get_drvinfo = mv643xx_get_drvinfo,
3275 .get_link = mv643xx_eth_get_link, 3275 .get_link = mv643xx_eth_get_link,
3276 .set_sg = ethtool_op_set_sg, 3276 .set_sg = ethtool_op_set_sg,
3277 .get_sset_count = mv643xx_get_sset_count,
3277 .get_ethtool_stats = mv643xx_get_ethtool_stats, 3278 .get_ethtool_stats = mv643xx_get_ethtool_stats,
3278 .get_strings = mv643xx_get_strings, 3279 .get_strings = mv643xx_get_strings,
3279 .nway_reset = mv643xx_eth_nway_restart, 3280 .nway_reset = mv643xx_eth_nway_restart,
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ed1f9bbb2a32..112ab079ce7d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3103,31 +3103,12 @@ static int niu_alloc_tx_ring_info(struct niu *np,
3103 3103
3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
3105{ 3105{
3106 u16 bs; 3106 u16 bss;
3107 3107
3108 switch (PAGE_SIZE) { 3108 bss = min(PAGE_SHIFT, 15);
3109 case 4 * 1024:
3110 case 8 * 1024:
3111 case 16 * 1024:
3112 case 32 * 1024:
3113 rp->rbr_block_size = PAGE_SIZE;
3114 rp->rbr_blocks_per_page = 1;
3115 break;
3116 3109
3117 default: 3110 rp->rbr_block_size = 1 << bss;
3118 if (PAGE_SIZE % (32 * 1024) == 0) 3111 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
3119 bs = 32 * 1024;
3120 else if (PAGE_SIZE % (16 * 1024) == 0)
3121 bs = 16 * 1024;
3122 else if (PAGE_SIZE % (8 * 1024) == 0)
3123 bs = 8 * 1024;
3124 else if (PAGE_SIZE % (4 * 1024) == 0)
3125 bs = 4 * 1024;
3126 else
3127 BUG();
3128 rp->rbr_block_size = bs;
3129 rp->rbr_blocks_per_page = PAGE_SIZE / bs;
3130 }
3131 3112
3132 rp->rbr_sizes[0] = 256; 3113 rp->rbr_sizes[0] = 256;
3133 rp->rbr_sizes[1] = 1024; 3114 rp->rbr_sizes[1] = 1024;
@@ -7902,12 +7883,7 @@ static int __init niu_init(void)
7902{ 7883{
7903 int err = 0; 7884 int err = 0;
7904 7885
7905 BUILD_BUG_ON((PAGE_SIZE < 4 * 1024) || 7886 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
7906 ((PAGE_SIZE > 32 * 1024) &&
7907 ((PAGE_SIZE % (32 * 1024)) != 0 &&
7908 (PAGE_SIZE % (16 * 1024)) != 0 &&
7909 (PAGE_SIZE % (8 * 1024)) != 0 &&
7910 (PAGE_SIZE % (4 * 1024)) != 0)));
7911 7887
7912 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 7888 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
7913 7889
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index c0b6d19d1457..bcb0885011c8 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,7 +55,7 @@
55#include <linux/mm.h> 55#include <linux/mm.h>
56#include <linux/ppp_defs.h> 56#include <linux/ppp_defs.h>
57#include <linux/ppp-comp.h> 57#include <linux/ppp-comp.h>
58#include <asm/scatterlist.h> 58#include <linux/scatterlist.h>
59 59
60#include "ppp_mppe.h" 60#include "ppp_mppe.h"
61 61
@@ -68,9 +68,7 @@ MODULE_VERSION("1.0.2");
68static unsigned int 68static unsigned int
69setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
70{ 70{
71 sg[0].page = virt_to_page(address); 71 sg_init_one(sg, address, length);
72 sg[0].offset = offset_in_page(address);
73 sg[0].length = length;
74 return length; 72 return length;
75} 73}
76 74
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 419c00cbe6e9..e8960f294a6e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -44,7 +44,8 @@
44 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 44 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
45 #expr,__FILE__,__FUNCTION__,__LINE__); \ 45 #expr,__FILE__,__FUNCTION__,__LINE__); \
46 } 46 }
47#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) 47#define dprintk(fmt, args...) \
48 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
48#else 49#else
49#define assert(expr) do {} while (0) 50#define assert(expr) do {} while (0)
50#define dprintk(fmt, args...) do {} while (0) 51#define dprintk(fmt, args...) do {} while (0)
@@ -111,19 +112,15 @@ enum mac_version {
111 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 112 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
112 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 113 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
113 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 114 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
114 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf 115 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
115 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec 116 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
116 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 117 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ?
117 RTL_GIGA_MAC_VER_15 = 0x0f // 8101 118 RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ?
118}; 119 RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec
119 120 RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf
120enum phy_version { 121 RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP
121 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 122 RTL_GIGA_MAC_VER_19 = 0x13, // 8168C
122 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 123 RTL_GIGA_MAC_VER_20 = 0x14 // 8168C
123 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
124 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
125 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
126 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
127}; 124};
128 125
129#define _R(NAME,MAC,MASK) \ 126#define _R(NAME,MAC,MASK) \
@@ -144,7 +141,12 @@ static const struct {
144 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 141 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
145 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 142 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
146 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 143 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
147 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139 144 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
145 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
146 _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
147 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
148 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
149 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880) // PCI-E
148}; 150};
149#undef _R 151#undef _R
150 152
@@ -165,7 +167,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
165 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 167 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
166 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 168 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
167 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 169 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
168 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, 170 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
169 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, 171 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
170 { PCI_VENDOR_ID_LINKSYS, 0x1032, 172 { PCI_VENDOR_ID_LINKSYS, 0x1032,
171 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, 173 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
@@ -277,6 +279,7 @@ enum rtl_register_content {
277 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 279 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
278 280
279 /* Config1 register p.24 */ 281 /* Config1 register p.24 */
282 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
280 PMEnable = (1 << 0), /* Power Management Enable */ 283 PMEnable = (1 << 0), /* Power Management Enable */
281 284
282 /* Config2 register p. 25 */ 285 /* Config2 register p. 25 */
@@ -380,17 +383,20 @@ struct ring_info {
380 u8 __pad[sizeof(void *) - sizeof(u32)]; 383 u8 __pad[sizeof(void *) - sizeof(u32)];
381}; 384};
382 385
386enum features {
387 RTL_FEATURE_WOL = (1 << 0),
388 RTL_FEATURE_MSI = (1 << 1),
389};
390
383struct rtl8169_private { 391struct rtl8169_private {
384 void __iomem *mmio_addr; /* memory map physical address */ 392 void __iomem *mmio_addr; /* memory map physical address */
385 struct pci_dev *pci_dev; /* Index of PCI device */ 393 struct pci_dev *pci_dev; /* Index of PCI device */
386 struct net_device *dev; 394 struct net_device *dev;
387 struct napi_struct napi; 395 struct napi_struct napi;
388 struct net_device_stats stats; /* statistics of net device */
389 spinlock_t lock; /* spin lock flag */ 396 spinlock_t lock; /* spin lock flag */
390 u32 msg_enable; 397 u32 msg_enable;
391 int chipset; 398 int chipset;
392 int mac_version; 399 int mac_version;
393 int phy_version;
394 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ 400 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
395 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ 401 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
396 u32 dirty_rx; 402 u32 dirty_rx;
@@ -420,7 +426,7 @@ struct rtl8169_private {
420 unsigned int (*phy_reset_pending)(void __iomem *); 426 unsigned int (*phy_reset_pending)(void __iomem *);
421 unsigned int (*link_ok)(void __iomem *); 427 unsigned int (*link_ok)(void __iomem *);
422 struct delayed_work task; 428 struct delayed_work task;
423 unsigned wol_enabled : 1; 429 unsigned features;
424}; 430};
425 431
426MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 432MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -626,7 +632,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
626 632
627 RTL_W8(Cfg9346, Cfg9346_Lock); 633 RTL_W8(Cfg9346, Cfg9346_Lock);
628 634
629 tp->wol_enabled = (wol->wolopts) ? 1 : 0; 635 if (wol->wolopts)
636 tp->features |= RTL_FEATURE_WOL;
637 else
638 tp->features &= ~RTL_FEATURE_WOL;
630 639
631 spin_unlock_irq(&tp->lock); 640 spin_unlock_irq(&tp->lock);
632 641
@@ -707,7 +716,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
707 716
708 /* This tweak comes straight from Realtek's driver. */ 717 /* This tweak comes straight from Realtek's driver. */
709 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && 718 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
710 (tp->mac_version == RTL_GIGA_MAC_VER_13)) { 719 ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
720 (tp->mac_version == RTL_GIGA_MAC_VER_16))) {
711 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; 721 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
712 } 722 }
713 } 723 }
@@ -715,7 +725,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
715 /* The 8100e/8101e do Fast Ethernet only. */ 725 /* The 8100e/8101e do Fast Ethernet only. */
716 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 726 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
717 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 727 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
718 (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 728 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
729 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
719 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && 730 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
720 netif_msg_link(tp)) { 731 netif_msg_link(tp)) {
721 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", 732 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
@@ -726,7 +737,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
726 737
727 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 738 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
728 739
729 if (tp->mac_version == RTL_GIGA_MAC_VER_12) { 740 if ((tp->mac_version == RTL_GIGA_MAC_VER_12) ||
741 (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
730 /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ 742 /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */
731 mdio_write(ioaddr, 0x1f, 0x0000); 743 mdio_write(ioaddr, 0x1f, 0x0000);
732 mdio_write(ioaddr, 0x0e, 0x0000); 744 mdio_write(ioaddr, 0x0e, 0x0000);
@@ -1104,26 +1116,51 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1104 */ 1116 */
1105 const struct { 1117 const struct {
1106 u32 mask; 1118 u32 mask;
1119 u32 val;
1107 int mac_version; 1120 int mac_version;
1108 } mac_info[] = { 1121 } mac_info[] = {
1109 { 0x38800000, RTL_GIGA_MAC_VER_15 }, 1122 /* 8168B family. */
1110 { 0x38000000, RTL_GIGA_MAC_VER_12 }, 1123 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1111 { 0x34000000, RTL_GIGA_MAC_VER_13 }, 1124 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1112 { 0x30800000, RTL_GIGA_MAC_VER_14 }, 1125 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1113 { 0x30000000, RTL_GIGA_MAC_VER_11 }, 1126 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_20 },
1114 { 0x98000000, RTL_GIGA_MAC_VER_06 }, 1127
1115 { 0x18000000, RTL_GIGA_MAC_VER_05 }, 1128 /* 8168B family. */
1116 { 0x10000000, RTL_GIGA_MAC_VER_04 }, 1129 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1117 { 0x04000000, RTL_GIGA_MAC_VER_03 }, 1130 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1118 { 0x00800000, RTL_GIGA_MAC_VER_02 }, 1131 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1119 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ 1132 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1133
1134 /* 8101 family. */
1135 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1136 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1137 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1138 /* FIXME: where did these entries come from ? -- FR */
1139 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1140 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1141
1142 /* 8110 family. */
1143 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1144 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1145 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1146 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1147 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1148 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1149
1150 { 0x00000000, 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1120 }, *p = mac_info; 1151 }, *p = mac_info;
1121 u32 reg; 1152 u32 reg;
1122 1153
1123 reg = RTL_R32(TxConfig) & 0xfc800000; 1154 reg = RTL_R32(TxConfig);
1124 while ((reg & p->mask) != p->mask) 1155 while ((reg & p->mask) != p->val)
1125 p++; 1156 p++;
1126 tp->mac_version = p->mac_version; 1157 tp->mac_version = p->mac_version;
1158
1159 if (p->mask == 0x00000000) {
1160 struct pci_dev *pdev = tp->pci_dev;
1161
1162 dev_info(&pdev->dev, "unknown MAC (%08x)\n", reg);
1163 }
1127} 1164}
1128 1165
1129static void rtl8169_print_mac_version(struct rtl8169_private *tp) 1166static void rtl8169_print_mac_version(struct rtl8169_private *tp)
@@ -1131,54 +1168,21 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1131 dprintk("mac_version = 0x%02x\n", tp->mac_version); 1168 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1132} 1169}
1133 1170
1134static void rtl8169_get_phy_version(struct rtl8169_private *tp, 1171struct phy_reg {
1135 void __iomem *ioaddr)
1136{
1137 const struct {
1138 u16 mask;
1139 u16 set;
1140 int phy_version;
1141 } phy_info[] = {
1142 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1143 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1144 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1145 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1146 }, *p = phy_info;
1147 u16 reg; 1172 u16 reg;
1173 u16 val;
1174};
1148 1175
1149 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff; 1176static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len)
1150 while ((reg & p->mask) != p->set)
1151 p++;
1152 tp->phy_version = p->phy_version;
1153}
1154
1155static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1156{ 1177{
1157 struct { 1178 while (len-- > 0) {
1158 int version; 1179 mdio_write(ioaddr, regs->reg, regs->val);
1159 char *msg; 1180 regs++;
1160 u32 reg;
1161 } phy_print[] = {
1162 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1163 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1164 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1165 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1166 { 0, NULL, 0x0000 }
1167 }, *p;
1168
1169 for (p = phy_print; p->msg; p++) {
1170 if (tp->phy_version == p->version) {
1171 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1172 return;
1173 }
1174 } 1181 }
1175 dprintk("phy_version == Unknown\n");
1176} 1182}
1177 1183
1178static void rtl8169_hw_phy_config(struct net_device *dev) 1184static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1179{ 1185{
1180 struct rtl8169_private *tp = netdev_priv(dev);
1181 void __iomem *ioaddr = tp->mmio_addr;
1182 struct { 1186 struct {
1183 u16 regs[5]; /* Beware of bit-sign propagation */ 1187 u16 regs[5]; /* Beware of bit-sign propagation */
1184 } phy_magic[5] = { { 1188 } phy_magic[5] = { {
@@ -1211,33 +1215,9 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1211 }, *p = phy_magic; 1215 }, *p = phy_magic;
1212 unsigned int i; 1216 unsigned int i;
1213 1217
1214 rtl8169_print_mac_version(tp); 1218 mdio_write(ioaddr, 0x1f, 0x0001); //w 31 2 0 1
1215 rtl8169_print_phy_version(tp); 1219 mdio_write(ioaddr, 0x15, 0x1000); //w 21 15 0 1000
1216 1220 mdio_write(ioaddr, 0x18, 0x65c7); //w 24 15 0 65c7
1217 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1218 return;
1219 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1220 return;
1221
1222 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1223 dprintk("Do final_reg2.cfg\n");
1224
1225 /* Shazam ! */
1226
1227 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1228 mdio_write(ioaddr, 31, 0x0002);
1229 mdio_write(ioaddr, 1, 0x90d0);
1230 mdio_write(ioaddr, 31, 0x0000);
1231 return;
1232 }
1233
1234 if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1235 (tp->mac_version != RTL_GIGA_MAC_VER_03))
1236 return;
1237
1238 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1239 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1240 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1241 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1221 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1242 1222
1243 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { 1223 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
@@ -1250,7 +1230,115 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1250 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 1230 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1251 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1231 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1252 } 1232 }
1253 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 1233 mdio_write(ioaddr, 0x1f, 0x0000); //w 31 2 0 0
1234}
1235
1236static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
1237{
1238 struct phy_reg phy_reg_init[] = {
1239 { 0x1f, 0x0002 },
1240 { 0x01, 0x90d0 },
1241 { 0x1f, 0x0000 }
1242 };
1243
1244 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1245}
1246static void rtl8168b_hw_phy_config(void __iomem *ioaddr)
1247{
1248 struct phy_reg phy_reg_init[] = {
1249 { 0x1f, 0x0000 },
1250 { 0x10, 0xf41b },
1251 { 0x1f, 0x0000 }
1252 };
1253
1254 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1255}
1256
1257static void rtl8168cp_hw_phy_config(void __iomem *ioaddr)
1258{
1259 struct phy_reg phy_reg_init[] = {
1260 { 0x1f, 0x0000 },
1261 { 0x1d, 0x0f00 },
1262 { 0x1f, 0x0002 },
1263 { 0x0c, 0x1ec8 },
1264 { 0x1f, 0x0000 }
1265 };
1266
1267 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1268}
1269
1270static void rtl8168c_hw_phy_config(void __iomem *ioaddr)
1271{
1272 struct phy_reg phy_reg_init[] = {
1273 { 0x1f, 0x0001 },
1274 { 0x12, 0x2300 },
1275 { 0x1f, 0x0002 },
1276 { 0x00, 0x88d4 },
1277 { 0x01, 0x82b1 },
1278 { 0x03, 0x7002 },
1279 { 0x08, 0x9e30 },
1280 { 0x09, 0x01f0 },
1281 { 0x0a, 0x5500 },
1282 { 0x0c, 0x00c8 },
1283 { 0x1f, 0x0003 },
1284 { 0x12, 0xc096 },
1285 { 0x16, 0x000a },
1286 { 0x1f, 0x0000 }
1287 };
1288
1289 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1290}
1291
1292static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
1293{
1294 struct phy_reg phy_reg_init[] = {
1295 { 0x1f, 0x0000 },
1296 { 0x12, 0x2300 },
1297 { 0x1f, 0x0003 },
1298 { 0x16, 0x0f0a },
1299 { 0x1f, 0x0000 },
1300 { 0x1f, 0x0002 },
1301 { 0x0c, 0x7eb8 },
1302 { 0x1f, 0x0000 }
1303 };
1304
1305 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1306}
1307
1308static void rtl_hw_phy_config(struct net_device *dev)
1309{
1310 struct rtl8169_private *tp = netdev_priv(dev);
1311 void __iomem *ioaddr = tp->mmio_addr;
1312
1313 rtl8169_print_mac_version(tp);
1314
1315 switch (tp->mac_version) {
1316 case RTL_GIGA_MAC_VER_01:
1317 break;
1318 case RTL_GIGA_MAC_VER_02:
1319 case RTL_GIGA_MAC_VER_03:
1320 rtl8169s_hw_phy_config(ioaddr);
1321 break;
1322 case RTL_GIGA_MAC_VER_04:
1323 rtl8169sb_hw_phy_config(ioaddr);
1324 break;
1325 case RTL_GIGA_MAC_VER_11:
1326 case RTL_GIGA_MAC_VER_12:
1327 case RTL_GIGA_MAC_VER_17:
1328 rtl8168b_hw_phy_config(ioaddr);
1329 break;
1330 case RTL_GIGA_MAC_VER_18:
1331 rtl8168cp_hw_phy_config(ioaddr);
1332 break;
1333 case RTL_GIGA_MAC_VER_19:
1334 rtl8168c_hw_phy_config(ioaddr);
1335 break;
1336 case RTL_GIGA_MAC_VER_20:
1337 rtl8168cx_hw_phy_config(ioaddr);
1338 break;
1339 default:
1340 break;
1341 }
1254} 1342}
1255 1343
1256static void rtl8169_phy_timer(unsigned long __opaque) 1344static void rtl8169_phy_timer(unsigned long __opaque)
@@ -1262,7 +1350,6 @@ static void rtl8169_phy_timer(unsigned long __opaque)
1262 unsigned long timeout = RTL8169_PHY_TIMEOUT; 1350 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1263 1351
1264 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 1352 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1265 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1266 1353
1267 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1354 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1268 return; 1355 return;
@@ -1297,8 +1384,7 @@ static inline void rtl8169_delete_timer(struct net_device *dev)
1297 struct rtl8169_private *tp = netdev_priv(dev); 1384 struct rtl8169_private *tp = netdev_priv(dev);
1298 struct timer_list *timer = &tp->timer; 1385 struct timer_list *timer = &tp->timer;
1299 1386
1300 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1387 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1301 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1302 return; 1388 return;
1303 1389
1304 del_timer_sync(timer); 1390 del_timer_sync(timer);
@@ -1309,8 +1395,7 @@ static inline void rtl8169_request_timer(struct net_device *dev)
1309 struct rtl8169_private *tp = netdev_priv(dev); 1395 struct rtl8169_private *tp = netdev_priv(dev);
1310 struct timer_list *timer = &tp->timer; 1396 struct timer_list *timer = &tp->timer;
1311 1397
1312 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1398 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1313 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1314 return; 1399 return;
1315 1400
1316 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); 1401 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1362,7 +1447,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1362{ 1447{
1363 void __iomem *ioaddr = tp->mmio_addr; 1448 void __iomem *ioaddr = tp->mmio_addr;
1364 1449
1365 rtl8169_hw_phy_config(dev); 1450 rtl_hw_phy_config(dev);
1366 1451
1367 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 1452 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1368 RTL_W8(0x82, 0x01); 1453 RTL_W8(0x82, 0x01);
@@ -1457,6 +1542,7 @@ static const struct rtl_cfg_info {
1457 unsigned int align; 1542 unsigned int align;
1458 u16 intr_event; 1543 u16 intr_event;
1459 u16 napi_event; 1544 u16 napi_event;
1545 unsigned msi;
1460} rtl_cfg_infos [] = { 1546} rtl_cfg_infos [] = {
1461 [RTL_CFG_0] = { 1547 [RTL_CFG_0] = {
1462 .hw_start = rtl_hw_start_8169, 1548 .hw_start = rtl_hw_start_8169,
@@ -1464,7 +1550,8 @@ static const struct rtl_cfg_info {
1464 .align = 0, 1550 .align = 0,
1465 .intr_event = SYSErr | LinkChg | RxOverflow | 1551 .intr_event = SYSErr | LinkChg | RxOverflow |
1466 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1552 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1467 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1553 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1554 .msi = 0
1468 }, 1555 },
1469 [RTL_CFG_1] = { 1556 [RTL_CFG_1] = {
1470 .hw_start = rtl_hw_start_8168, 1557 .hw_start = rtl_hw_start_8168,
@@ -1472,7 +1559,8 @@ static const struct rtl_cfg_info {
1472 .align = 8, 1559 .align = 8,
1473 .intr_event = SYSErr | LinkChg | RxOverflow | 1560 .intr_event = SYSErr | LinkChg | RxOverflow |
1474 TxErr | TxOK | RxOK | RxErr, 1561 TxErr | TxOK | RxOK | RxErr,
1475 .napi_event = TxErr | TxOK | RxOK | RxOverflow 1562 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
1563 .msi = RTL_FEATURE_MSI
1476 }, 1564 },
1477 [RTL_CFG_2] = { 1565 [RTL_CFG_2] = {
1478 .hw_start = rtl_hw_start_8101, 1566 .hw_start = rtl_hw_start_8101,
@@ -1480,10 +1568,39 @@ static const struct rtl_cfg_info {
1480 .align = 8, 1568 .align = 8,
1481 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 1569 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
1482 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1570 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1483 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1571 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1572 .msi = RTL_FEATURE_MSI
1484 } 1573 }
1485}; 1574};
1486 1575
1576/* Cfg9346_Unlock assumed. */
1577static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
1578 const struct rtl_cfg_info *cfg)
1579{
1580 unsigned msi = 0;
1581 u8 cfg2;
1582
1583 cfg2 = RTL_R8(Config2) & ~MSIEnable;
1584 if (cfg->msi) {
1585 if (pci_enable_msi(pdev)) {
1586 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
1587 } else {
1588 cfg2 |= MSIEnable;
1589 msi = RTL_FEATURE_MSI;
1590 }
1591 }
1592 RTL_W8(Config2, cfg2);
1593 return msi;
1594}
1595
1596static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
1597{
1598 if (tp->features & RTL_FEATURE_MSI) {
1599 pci_disable_msi(pdev);
1600 tp->features &= ~RTL_FEATURE_MSI;
1601 }
1602}
1603
1487static int __devinit 1604static int __devinit
1488rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1605rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1489{ 1606{
@@ -1596,10 +1713,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1596 1713
1597 /* Identify chip attached to board */ 1714 /* Identify chip attached to board */
1598 rtl8169_get_mac_version(tp, ioaddr); 1715 rtl8169_get_mac_version(tp, ioaddr);
1599 rtl8169_get_phy_version(tp, ioaddr);
1600 1716
1601 rtl8169_print_mac_version(tp); 1717 rtl8169_print_mac_version(tp);
1602 rtl8169_print_phy_version(tp);
1603 1718
1604 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { 1719 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1605 if (tp->mac_version == rtl_chip_info[i].mac_version) 1720 if (tp->mac_version == rtl_chip_info[i].mac_version)
@@ -1619,6 +1734,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1619 RTL_W8(Cfg9346, Cfg9346_Unlock); 1734 RTL_W8(Cfg9346, Cfg9346_Unlock);
1620 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 1735 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1621 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 1736 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1737 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
1622 RTL_W8(Cfg9346, Cfg9346_Lock); 1738 RTL_W8(Cfg9346, Cfg9346_Lock);
1623 1739
1624 if (RTL_R8(PHYstatus) & TBI_Enable) { 1740 if (RTL_R8(PHYstatus) & TBI_Enable) {
@@ -1686,7 +1802,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1686 1802
1687 rc = register_netdev(dev); 1803 rc = register_netdev(dev);
1688 if (rc < 0) 1804 if (rc < 0)
1689 goto err_out_unmap_5; 1805 goto err_out_msi_5;
1690 1806
1691 pci_set_drvdata(pdev, dev); 1807 pci_set_drvdata(pdev, dev);
1692 1808
@@ -1709,7 +1825,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1709out: 1825out:
1710 return rc; 1826 return rc;
1711 1827
1712err_out_unmap_5: 1828err_out_msi_5:
1829 rtl_disable_msi(pdev, tp);
1713 iounmap(ioaddr); 1830 iounmap(ioaddr);
1714err_out_free_res_4: 1831err_out_free_res_4:
1715 pci_release_regions(pdev); 1832 pci_release_regions(pdev);
@@ -1730,6 +1847,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1730 flush_scheduled_work(); 1847 flush_scheduled_work();
1731 1848
1732 unregister_netdev(dev); 1849 unregister_netdev(dev);
1850 rtl_disable_msi(pdev, tp);
1733 rtl8169_release_board(pdev, dev, tp->mmio_addr); 1851 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1734 pci_set_drvdata(pdev, NULL); 1852 pci_set_drvdata(pdev, NULL);
1735} 1853}
@@ -1773,7 +1891,8 @@ static int rtl8169_open(struct net_device *dev)
1773 1891
1774 smp_mb(); 1892 smp_mb();
1775 1893
1776 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, 1894 retval = request_irq(dev->irq, rtl8169_interrupt,
1895 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
1777 dev->name, dev); 1896 dev->name, dev);
1778 if (retval < 0) 1897 if (retval < 0)
1779 goto err_release_ring_2; 1898 goto err_release_ring_2;
@@ -1933,7 +2052,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
1933 2052
1934 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 2053 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1935 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 2054 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1936 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " 2055 dprintk("Set MAC Reg C+CR Offset 0xE0. "
1937 "Bit-3 and bit-14 MUST be 1\n"); 2056 "Bit-3 and bit-14 MUST be 1\n");
1938 tp->cp_cmd |= (1 << 14); 2057 tp->cp_cmd |= (1 << 14);
1939 } 2058 }
@@ -2029,7 +2148,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
2029 void __iomem *ioaddr = tp->mmio_addr; 2148 void __iomem *ioaddr = tp->mmio_addr;
2030 struct pci_dev *pdev = tp->pci_dev; 2149 struct pci_dev *pdev = tp->pci_dev;
2031 2150
2032 if (tp->mac_version == RTL_GIGA_MAC_VER_13) { 2151 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2152 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
2033 pci_write_config_word(pdev, 0x68, 0x00); 2153 pci_write_config_word(pdev, 0x68, 0x00);
2034 pci_write_config_word(pdev, 0x69, 0x08); 2154 pci_write_config_word(pdev, 0x69, 0x08);
2035 } 2155 }
@@ -2259,7 +2379,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
2259 dev_kfree_skb(skb); 2379 dev_kfree_skb(skb);
2260 tx_skb->skb = NULL; 2380 tx_skb->skb = NULL;
2261 } 2381 }
2262 tp->stats.tx_dropped++; 2382 tp->dev->stats.tx_dropped++;
2263 } 2383 }
2264 } 2384 }
2265 tp->cur_tx = tp->dirty_tx = 0; 2385 tp->cur_tx = tp->dirty_tx = 0;
@@ -2310,7 +2430,7 @@ static void rtl8169_reinit_task(struct work_struct *work)
2310 ret = rtl8169_open(dev); 2430 ret = rtl8169_open(dev);
2311 if (unlikely(ret < 0)) { 2431 if (unlikely(ret < 0)) {
2312 if (net_ratelimit() && netif_msg_drv(tp)) { 2432 if (net_ratelimit() && netif_msg_drv(tp)) {
2313 printk(PFX KERN_ERR "%s: reinit failure (status = %d)." 2433 printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
2314 " Rescheduling.\n", dev->name, ret); 2434 " Rescheduling.\n", dev->name, ret);
2315 } 2435 }
2316 rtl8169_schedule_work(dev, rtl8169_reinit_task); 2436 rtl8169_schedule_work(dev, rtl8169_reinit_task);
@@ -2340,9 +2460,10 @@ static void rtl8169_reset_task(struct work_struct *work)
2340 rtl8169_init_ring_indexes(tp); 2460 rtl8169_init_ring_indexes(tp);
2341 rtl_hw_start(dev); 2461 rtl_hw_start(dev);
2342 netif_wake_queue(dev); 2462 netif_wake_queue(dev);
2463 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
2343 } else { 2464 } else {
2344 if (net_ratelimit() && netif_msg_intr(tp)) { 2465 if (net_ratelimit() && netif_msg_intr(tp)) {
2345 printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", 2466 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
2346 dev->name); 2467 dev->name);
2347 } 2468 }
2348 rtl8169_schedule_work(dev, rtl8169_reset_task); 2469 rtl8169_schedule_work(dev, rtl8169_reset_task);
@@ -2496,7 +2617,7 @@ err_stop:
2496 netif_stop_queue(dev); 2617 netif_stop_queue(dev);
2497 ret = NETDEV_TX_BUSY; 2618 ret = NETDEV_TX_BUSY;
2498err_update_stats: 2619err_update_stats:
2499 tp->stats.tx_dropped++; 2620 dev->stats.tx_dropped++;
2500 goto out; 2621 goto out;
2501} 2622}
2502 2623
@@ -2571,8 +2692,8 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
2571 if (status & DescOwn) 2692 if (status & DescOwn)
2572 break; 2693 break;
2573 2694
2574 tp->stats.tx_bytes += len; 2695 dev->stats.tx_bytes += len;
2575 tp->stats.tx_packets++; 2696 dev->stats.tx_packets++;
2576 2697
2577 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 2698 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2578 2699
@@ -2672,14 +2793,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2672 "%s: Rx ERROR. status = %08x\n", 2793 "%s: Rx ERROR. status = %08x\n",
2673 dev->name, status); 2794 dev->name, status);
2674 } 2795 }
2675 tp->stats.rx_errors++; 2796 dev->stats.rx_errors++;
2676 if (status & (RxRWT | RxRUNT)) 2797 if (status & (RxRWT | RxRUNT))
2677 tp->stats.rx_length_errors++; 2798 dev->stats.rx_length_errors++;
2678 if (status & RxCRC) 2799 if (status & RxCRC)
2679 tp->stats.rx_crc_errors++; 2800 dev->stats.rx_crc_errors++;
2680 if (status & RxFOVF) { 2801 if (status & RxFOVF) {
2681 rtl8169_schedule_work(dev, rtl8169_reset_task); 2802 rtl8169_schedule_work(dev, rtl8169_reset_task);
2682 tp->stats.rx_fifo_errors++; 2803 dev->stats.rx_fifo_errors++;
2683 } 2804 }
2684 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2805 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2685 } else { 2806 } else {
@@ -2694,8 +2815,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2694 * sized frames. 2815 * sized frames.
2695 */ 2816 */
2696 if (unlikely(rtl8169_fragmented_frame(status))) { 2817 if (unlikely(rtl8169_fragmented_frame(status))) {
2697 tp->stats.rx_dropped++; 2818 dev->stats.rx_dropped++;
2698 tp->stats.rx_length_errors++; 2819 dev->stats.rx_length_errors++;
2699 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2820 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2700 continue; 2821 continue;
2701 } 2822 }
@@ -2719,8 +2840,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2719 rtl8169_rx_skb(skb); 2840 rtl8169_rx_skb(skb);
2720 2841
2721 dev->last_rx = jiffies; 2842 dev->last_rx = jiffies;
2722 tp->stats.rx_bytes += pkt_size; 2843 dev->stats.rx_bytes += pkt_size;
2723 tp->stats.rx_packets++; 2844 dev->stats.rx_packets++;
2724 } 2845 }
2725 2846
2726 /* Work around for AMD plateform. */ 2847 /* Work around for AMD plateform. */
@@ -2881,7 +3002,7 @@ core_down:
2881 rtl8169_asic_down(ioaddr); 3002 rtl8169_asic_down(ioaddr);
2882 3003
2883 /* Update the error counts. */ 3004 /* Update the error counts. */
2884 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3005 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
2885 RTL_W32(RxMissed, 0); 3006 RTL_W32(RxMissed, 0);
2886 3007
2887 spin_unlock_irq(&tp->lock); 3008 spin_unlock_irq(&tp->lock);
@@ -2984,7 +3105,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
2984 (tp->mac_version == RTL_GIGA_MAC_VER_12) || 3105 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2985 (tp->mac_version == RTL_GIGA_MAC_VER_13) || 3106 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2986 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 3107 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2987 (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 3108 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
3109 (tp->mac_version == RTL_GIGA_MAC_VER_16) ||
3110 (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
2988 mc_filter[0] = 0xffffffff; 3111 mc_filter[0] = 0xffffffff;
2989 mc_filter[1] = 0xffffffff; 3112 mc_filter[1] = 0xffffffff;
2990 } 3113 }
@@ -3011,12 +3134,12 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3011 3134
3012 if (netif_running(dev)) { 3135 if (netif_running(dev)) {
3013 spin_lock_irqsave(&tp->lock, flags); 3136 spin_lock_irqsave(&tp->lock, flags);
3014 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3137 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3015 RTL_W32(RxMissed, 0); 3138 RTL_W32(RxMissed, 0);
3016 spin_unlock_irqrestore(&tp->lock, flags); 3139 spin_unlock_irqrestore(&tp->lock, flags);
3017 } 3140 }
3018 3141
3019 return &tp->stats; 3142 return &dev->stats;
3020} 3143}
3021 3144
3022#ifdef CONFIG_PM 3145#ifdef CONFIG_PM
@@ -3037,14 +3160,15 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3037 3160
3038 rtl8169_asic_down(ioaddr); 3161 rtl8169_asic_down(ioaddr);
3039 3162
3040 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3163 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3041 RTL_W32(RxMissed, 0); 3164 RTL_W32(RxMissed, 0);
3042 3165
3043 spin_unlock_irq(&tp->lock); 3166 spin_unlock_irq(&tp->lock);
3044 3167
3045out_pci_suspend: 3168out_pci_suspend:
3046 pci_save_state(pdev); 3169 pci_save_state(pdev);
3047 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); 3170 pci_enable_wake(pdev, pci_choose_state(pdev, state),
3171 (tp->features & RTL_FEATURE_WOL) ? 1 : 0);
3048 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3172 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3049 3173
3050 return 0; 3174 return 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 014dc2cfe4d6..09440d783e65 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.84" 67#define DRV_MODULE_VERSION "3.85"
68#define DRV_MODULE_RELDATE "October 12, 2007" 68#define DRV_MODULE_RELDATE "October 18, 2007"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -200,6 +200,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
@@ -5028,10 +5029,7 @@ static int tg3_poll_fw(struct tg3 *tp)
5028/* Save PCI command register before chip reset */ 5029/* Save PCI command register before chip reset */
5029static void tg3_save_pci_state(struct tg3 *tp) 5030static void tg3_save_pci_state(struct tg3 *tp)
5030{ 5031{
5031 u32 val; 5032 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5032
5033 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5034 tp->pci_cmd = val;
5035} 5033}
5036 5034
5037/* Restore PCI state after chip reset */ 5035/* Restore PCI state after chip reset */
@@ -5054,7 +5052,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
5054 PCISTATE_ALLOW_APE_SHMEM_WR; 5052 PCISTATE_ALLOW_APE_SHMEM_WR;
5055 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 5053 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5056 5054
5057 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); 5055 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5058 5056
5059 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 5057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5060 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 5058 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
@@ -10820,9 +10818,24 @@ out_not_found:
10820 strcpy(tp->board_part_number, "none"); 10818 strcpy(tp->board_part_number, "none");
10821} 10819}
10822 10820
10821static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10822{
10823 u32 val;
10824
10825 if (tg3_nvram_read_swab(tp, offset, &val) ||
10826 (val & 0xfc000000) != 0x0c000000 ||
10827 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10828 val != 0)
10829 return 0;
10830
10831 return 1;
10832}
10833
10823static void __devinit tg3_read_fw_ver(struct tg3 *tp) 10834static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10824{ 10835{
10825 u32 val, offset, start; 10836 u32 val, offset, start;
10837 u32 ver_offset;
10838 int i, bcnt;
10826 10839
10827 if (tg3_nvram_read_swab(tp, 0, &val)) 10840 if (tg3_nvram_read_swab(tp, 0, &val))
10828 return; 10841 return;
@@ -10835,29 +10848,71 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10835 return; 10848 return;
10836 10849
10837 offset = tg3_nvram_logical_addr(tp, offset); 10850 offset = tg3_nvram_logical_addr(tp, offset);
10838 if (tg3_nvram_read_swab(tp, offset, &val)) 10851
10852 if (!tg3_fw_img_is_valid(tp, offset) ||
10853 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10839 return; 10854 return;
10840 10855
10841 if ((val & 0xfc000000) == 0x0c000000) { 10856 offset = offset + ver_offset - start;
10842 u32 ver_offset, addr; 10857 for (i = 0; i < 16; i += 4) {
10843 int i; 10858 if (tg3_nvram_read(tp, offset + i, &val))
10859 return;
10844 10860
10845 if (tg3_nvram_read_swab(tp, offset + 4, &val) || 10861 val = le32_to_cpu(val);
10846 tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) 10862 memcpy(tp->fw_ver + i, &val, 4);
10863 }
10864
10865 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10866 (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10867 return;
10868
10869 for (offset = TG3_NVM_DIR_START;
10870 offset < TG3_NVM_DIR_END;
10871 offset += TG3_NVM_DIRENT_SIZE) {
10872 if (tg3_nvram_read_swab(tp, offset, &val))
10847 return; 10873 return;
10848 10874
10849 if (val != 0) 10875 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10876 break;
10877 }
10878
10879 if (offset == TG3_NVM_DIR_END)
10880 return;
10881
10882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10883 start = 0x08000000;
10884 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10885 return;
10886
10887 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10888 !tg3_fw_img_is_valid(tp, offset) ||
10889 tg3_nvram_read_swab(tp, offset + 8, &val))
10890 return;
10891
10892 offset += val - start;
10893
10894 bcnt = strlen(tp->fw_ver);
10895
10896 tp->fw_ver[bcnt++] = ',';
10897 tp->fw_ver[bcnt++] = ' ';
10898
10899 for (i = 0; i < 4; i++) {
10900 if (tg3_nvram_read(tp, offset, &val))
10850 return; 10901 return;
10851 10902
10852 addr = offset + ver_offset - start; 10903 val = le32_to_cpu(val);
10853 for (i = 0; i < 16; i += 4) { 10904 offset += sizeof(val);
10854 if (tg3_nvram_read(tp, addr + i, &val))
10855 return;
10856 10905
10857 val = cpu_to_le32(val); 10906 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10858 memcpy(tp->fw_ver + i, &val, 4); 10907 memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10908 break;
10859 } 10909 }
10910
10911 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10912 bcnt += sizeof(val);
10860 } 10913 }
10914
10915 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10861} 10916}
10862 10917
10863static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 10918static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 6dbdad2b8f88..1d5b2a3dd29d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1540,6 +1540,12 @@
1540#define TG3_EEPROM_MAGIC_HW 0xabcd 1540#define TG3_EEPROM_MAGIC_HW 0xabcd
1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff 1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
1542 1542
1543#define TG3_NVM_DIR_START 0x18
1544#define TG3_NVM_DIR_END 0x78
1545#define TG3_NVM_DIRENT_SIZE 0xc
1546#define TG3_NVM_DIRTYPE_SHIFT 24
1547#define TG3_NVM_DIRTYPE_ASFINI 1
1548
1543/* 32K Window into NIC internal memory */ 1549/* 32K Window into NIC internal memory */
1544#define NIC_SRAM_WIN_BASE 0x00008000 1550#define NIC_SRAM_WIN_BASE 0x00008000
1545 1551
@@ -2415,10 +2421,11 @@ struct tg3 {
2415#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2421#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2416 2422
2417 u32 led_ctrl; 2423 u32 led_ctrl;
2418 u32 pci_cmd; 2424 u16 pci_cmd;
2419 2425
2420 char board_part_number[24]; 2426 char board_part_number[24];
2421 char fw_ver[16]; 2427#define TG3_VER_SIZE 32
2428 char fw_ver[TG3_VER_SIZE];
2422 u32 nic_sram_data_cfg; 2429 u32 nic_sram_data_cfg;
2423 u32 pci_clock_ctrl; 2430 u32 pci_clock_ctrl;
2424 struct pci_dev *pdev_peer; 2431 struct pci_dev *pdev_peer;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
new file mode 100644
index 000000000000..e396c9d2af8d
--- /dev/null
+++ b/drivers/net/virtio_net.c
@@ -0,0 +1,435 @@
1/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/module.h>
23#include <linux/virtio.h>
24#include <linux/virtio_net.h>
25#include <linux/scatterlist.h>
26
27/* FIXME: MTU in config. */
28#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
29
30struct virtnet_info
31{
32 struct virtio_device *vdev;
33 struct virtqueue *rvq, *svq;
34 struct net_device *dev;
35 struct napi_struct napi;
36
37 /* Number of input buffers, and max we've ever had. */
38 unsigned int num, max;
39
40 /* Receive & send queues. */
41 struct sk_buff_head recv;
42 struct sk_buff_head send;
43};
44
45static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
46{
47 return (struct virtio_net_hdr *)skb->cb;
48}
49
50static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
51{
52 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
53}
54
55static bool skb_xmit_done(struct virtqueue *rvq)
56{
57 struct virtnet_info *vi = rvq->vdev->priv;
58
59 /* In case we were waiting for output buffers. */
60 netif_wake_queue(vi->dev);
61 return true;
62}
63
64static void receive_skb(struct net_device *dev, struct sk_buff *skb,
65 unsigned len)
66{
67 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
68
69 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
70 pr_debug("%s: short packet %i\n", dev->name, len);
71 dev->stats.rx_length_errors++;
72 goto drop;
73 }
74 len -= sizeof(struct virtio_net_hdr);
75 BUG_ON(len > MAX_PACKET_LEN);
76
77 skb_trim(skb, len);
78 skb->protocol = eth_type_trans(skb, dev);
79 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
80 ntohs(skb->protocol), skb->len, skb->pkt_type);
81 dev->stats.rx_bytes += skb->len;
82 dev->stats.rx_packets++;
83
84 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
85 pr_debug("Needs csum!\n");
86 skb->ip_summed = CHECKSUM_PARTIAL;
87 skb->csum_start = hdr->csum_start;
88 skb->csum_offset = hdr->csum_offset;
89 if (skb->csum_start > skb->len - 2
90 || skb->csum_offset > skb->len - 2) {
91 if (net_ratelimit())
92 printk(KERN_WARNING "%s: csum=%u/%u len=%u\n",
93 dev->name, skb->csum_start,
94 skb->csum_offset, skb->len);
95 goto frame_err;
96 }
97 }
98
99 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
100 pr_debug("GSO!\n");
101 switch (hdr->gso_type) {
102 case VIRTIO_NET_HDR_GSO_TCPV4:
103 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
104 break;
105 case VIRTIO_NET_HDR_GSO_TCPV4_ECN:
106 skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN;
107 break;
108 case VIRTIO_NET_HDR_GSO_UDP:
109 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
110 break;
111 case VIRTIO_NET_HDR_GSO_TCPV6:
112 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
113 break;
114 default:
115 if (net_ratelimit())
116 printk(KERN_WARNING "%s: bad gso type %u.\n",
117 dev->name, hdr->gso_type);
118 goto frame_err;
119 }
120
121 skb_shinfo(skb)->gso_size = hdr->gso_size;
122 if (skb_shinfo(skb)->gso_size == 0) {
123 if (net_ratelimit())
124 printk(KERN_WARNING "%s: zero gso size.\n",
125 dev->name);
126 goto frame_err;
127 }
128
129 /* Header must be checked, and gso_segs computed. */
130 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
131 skb_shinfo(skb)->gso_segs = 0;
132 }
133
134 netif_receive_skb(skb);
135 return;
136
137frame_err:
138 dev->stats.rx_frame_errors++;
139drop:
140 dev_kfree_skb(skb);
141}
142
143static void try_fill_recv(struct virtnet_info *vi)
144{
145 struct sk_buff *skb;
146 struct scatterlist sg[1+MAX_SKB_FRAGS];
147 int num, err;
148
149 for (;;) {
150 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
151 if (unlikely(!skb))
152 break;
153
154 skb_put(skb, MAX_PACKET_LEN);
155 vnet_hdr_to_sg(sg, skb);
156 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
157 skb_queue_head(&vi->recv, skb);
158
159 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
160 if (err) {
161 skb_unlink(skb, &vi->recv);
162 kfree_skb(skb);
163 break;
164 }
165 vi->num++;
166 }
167 if (unlikely(vi->num > vi->max))
168 vi->max = vi->num;
169 vi->rvq->vq_ops->kick(vi->rvq);
170}
171
172static bool skb_recv_done(struct virtqueue *rvq)
173{
174 struct virtnet_info *vi = rvq->vdev->priv;
175 netif_rx_schedule(vi->dev, &vi->napi);
176 /* Suppress further interrupts. */
177 return false;
178}
179
180static int virtnet_poll(struct napi_struct *napi, int budget)
181{
182 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
183 struct sk_buff *skb = NULL;
184 unsigned int len, received = 0;
185
186again:
187 while (received < budget &&
188 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
189 __skb_unlink(skb, &vi->recv);
190 receive_skb(vi->dev, skb, len);
191 vi->num--;
192 received++;
193 }
194
195 /* FIXME: If we oom and completely run out of inbufs, we need
196 * to start a timer trying to fill more. */
197 if (vi->num < vi->max / 2)
198 try_fill_recv(vi);
199
200 /* All done? */
201 if (!skb) {
202 netif_rx_complete(vi->dev, napi);
203 if (unlikely(!vi->rvq->vq_ops->restart(vi->rvq))
204 && netif_rx_reschedule(vi->dev, napi))
205 goto again;
206 }
207
208 return received;
209}
210
211static void free_old_xmit_skbs(struct virtnet_info *vi)
212{
213 struct sk_buff *skb;
214 unsigned int len;
215
216 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
217 pr_debug("Sent skb %p\n", skb);
218 __skb_unlink(skb, &vi->send);
219 vi->dev->stats.tx_bytes += len;
220 vi->dev->stats.tx_packets++;
221 kfree_skb(skb);
222 }
223}
224
225static int start_xmit(struct sk_buff *skb, struct net_device *dev)
226{
227 struct virtnet_info *vi = netdev_priv(dev);
228 int num, err;
229 struct scatterlist sg[1+MAX_SKB_FRAGS];
230 struct virtio_net_hdr *hdr;
231 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
232 DECLARE_MAC_BUF(mac);
233
234 pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));
235
236 free_old_xmit_skbs(vi);
237
238 /* Encode metadata header at front. */
239 hdr = skb_vnet_hdr(skb);
240 if (skb->ip_summed == CHECKSUM_PARTIAL) {
241 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
242 hdr->csum_start = skb->csum_start - skb_headroom(skb);
243 hdr->csum_offset = skb->csum_offset;
244 } else {
245 hdr->flags = 0;
246 hdr->csum_offset = hdr->csum_start = 0;
247 }
248
249 if (skb_is_gso(skb)) {
250 hdr->gso_size = skb_shinfo(skb)->gso_size;
251 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
252 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN;
253 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
254 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
255 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
256 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
257 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
258 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
259 else
260 BUG();
261 } else {
262 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
263 hdr->gso_size = 0;
264 }
265
266 vnet_hdr_to_sg(sg, skb);
267 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
268 __skb_queue_head(&vi->send, skb);
269 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
270 if (err) {
271 pr_debug("%s: virtio not prepared to send\n", dev->name);
272 skb_unlink(skb, &vi->send);
273 netif_stop_queue(dev);
274 return NETDEV_TX_BUSY;
275 }
276 vi->svq->vq_ops->kick(vi->svq);
277
278 return 0;
279}
280
281static int virtnet_open(struct net_device *dev)
282{
283 struct virtnet_info *vi = netdev_priv(dev);
284
285 try_fill_recv(vi);
286
287 /* If we didn't even get one input buffer, we're useless. */
288 if (vi->num == 0)
289 return -ENOMEM;
290
291 napi_enable(&vi->napi);
292 return 0;
293}
294
295static int virtnet_close(struct net_device *dev)
296{
297 struct virtnet_info *vi = netdev_priv(dev);
298 struct sk_buff *skb;
299
300 napi_disable(&vi->napi);
301
302 /* networking core has neutered skb_xmit_done/skb_recv_done, so don't
303 * worry about races vs. get(). */
304 vi->rvq->vq_ops->shutdown(vi->rvq);
305 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
306 kfree_skb(skb);
307 vi->num--;
308 }
309 vi->svq->vq_ops->shutdown(vi->svq);
310 while ((skb = __skb_dequeue(&vi->send)) != NULL)
311 kfree_skb(skb);
312
313 BUG_ON(vi->num != 0);
314 return 0;
315}
316
317static int virtnet_probe(struct virtio_device *vdev)
318{
319 int err;
320 unsigned int len;
321 struct net_device *dev;
322 struct virtnet_info *vi;
323 void *token;
324
325 /* Allocate ourselves a network device with room for our info */
326 dev = alloc_etherdev(sizeof(struct virtnet_info));
327 if (!dev)
328 return -ENOMEM;
329
330 /* Set up network device as normal. */
331 ether_setup(dev);
332 dev->open = virtnet_open;
333 dev->stop = virtnet_close;
334 dev->hard_start_xmit = start_xmit;
335 dev->features = NETIF_F_HIGHDMA;
336 SET_NETDEV_DEV(dev, &vdev->dev);
337
338 /* Do we support "hardware" checksums? */
339 token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_F, &len);
340 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_NO_CSUM)) {
341 /* This opens up the world of extra features. */
342 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
343 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4))
344 dev->features |= NETIF_F_TSO;
345 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_UFO))
346 dev->features |= NETIF_F_UFO;
347 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4_ECN))
348 dev->features |= NETIF_F_TSO_ECN;
349 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO6))
350 dev->features |= NETIF_F_TSO6;
351 }
352
353 /* Configuration may specify what MAC to use. Otherwise random. */
354 token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_MAC_F, &len);
355 if (token) {
356 dev->addr_len = len;
357 vdev->config->get(vdev, token, dev->dev_addr, len);
358 } else
359 random_ether_addr(dev->dev_addr);
360
361 /* Set up our device-specific information */
362 vi = netdev_priv(dev);
363 netif_napi_add(dev, &vi->napi, virtnet_poll, 16);
364 vi->dev = dev;
365 vi->vdev = vdev;
366
367 /* We expect two virtqueues, receive then send. */
368 vi->rvq = vdev->config->find_vq(vdev, skb_recv_done);
369 if (IS_ERR(vi->rvq)) {
370 err = PTR_ERR(vi->rvq);
371 goto free;
372 }
373
374 vi->svq = vdev->config->find_vq(vdev, skb_xmit_done);
375 if (IS_ERR(vi->svq)) {
376 err = PTR_ERR(vi->svq);
377 goto free_recv;
378 }
379
380 /* Initialize our empty receive and send queues. */
381 skb_queue_head_init(&vi->recv);
382 skb_queue_head_init(&vi->send);
383
384 err = register_netdev(dev);
385 if (err) {
386 pr_debug("virtio_net: registering device failed\n");
387 goto free_send;
388 }
389 pr_debug("virtnet: registered device %s\n", dev->name);
390 vdev->priv = vi;
391 return 0;
392
393free_send:
394 vdev->config->del_vq(vi->svq);
395free_recv:
396 vdev->config->del_vq(vi->rvq);
397free:
398 free_netdev(dev);
399 return err;
400}
401
402static void virtnet_remove(struct virtio_device *vdev)
403{
404 unregister_netdev(vdev->priv);
405 free_netdev(vdev->priv);
406}
407
408static struct virtio_device_id id_table[] = {
409 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
410 { 0 },
411};
412
413static struct virtio_driver virtio_net = {
414 .driver.name = KBUILD_MODNAME,
415 .driver.owner = THIS_MODULE,
416 .id_table = id_table,
417 .probe = virtnet_probe,
418 .remove = __devexit_p(virtnet_remove),
419};
420
421static int __init init(void)
422{
423 return register_virtio_driver(&virtio_net);
424}
425
426static void __exit fini(void)
427{
428 unregister_virtio_driver(&virtio_net);
429}
430module_init(init);
431module_exit(fini);
432
433MODULE_DEVICE_TABLE(virtio, id_table);
434MODULE_DESCRIPTION("Virtio network driver");
435MODULE_LICENSE("GPL");